diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ae231ce6..63656570 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -12,7 +12,7 @@ build: script: - ./bootstrap - - ./configure --with-linux-dir=/usr/src/linux-obj/$(uname -i)/default --disable-8139too --enable-tty --with-devices=2 --enable-ccat + - ./configure --with-linux-dir=/usr/src/linux-obj/$(uname -i)/default --enable-tty --with-devices=2 --enable-ccat - make -j8 all modules - make DISTCHECK_CONFIGURE_FLAGS="--with-linux-dir=/usr/src/linux-obj/$(uname -i)/default" distcheck @@ -54,7 +54,7 @@ doxygen: GIT_SUBMODULE_STRATEGY: recursive script: - ./bootstrap - - ./configure --with-linux-dir=/usr/src/linux-obj/$(uname -i)/default --disable-8139too --enable-tty --with-devices=2 --enable-ccat + - ./configure --with-linux-dir=/usr/src/linux-obj/$(uname -i)/default --enable-tty --with-devices=2 --enable-ccat - make doc - mv doxygen-output/html/ html/ artifacts: @@ -83,7 +83,7 @@ pdf: update docs server: stage: deploy rules: - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PROJECT_NAMESPACE == "etherlab.org" + - if: $CI_COMMIT_BRANCH == "stable-1.5" && $CI_PROJECT_NAMESPACE == "etherlab.org" trigger: etherlab.org/docs # from 'Workflows/MergeRequest-Pipelines.gitlab-ci.yml', but on all branches diff --git a/Makefile.am b/Makefile.am index a8901217..e9022353 100644 --- a/Makefile.am +++ b/Makefile.am @@ -30,7 +30,6 @@ include $(top_srcdir)/Makefile.kbuild ACLOCAL_AMFLAGS = -I m4 AM_DISTCHECK_CONFIGURE_FLAGS = \ - --disable-8139too \ --with-systemdsystemunitdir=$$dc_install_base/$(systemdsystemunitdir) SUBDIRS = \ diff --git a/configure.ac b/configure.ac index b9fdc798..1b5f8ee3 100644 --- a/configure.ac +++ b/configure.ac @@ -209,7 +209,7 @@ AC_ARG_ENABLE([8139too], ;; esac ], - [enable8139too=$enablekernel] + [enable8139too=0] # disabled by default ) AM_CONDITIONAL(ENABLE_8139TOO, test "x$enable8139too" = "x1") @@ -432,6 +432,54 @@ fi AC_SUBST(KERNEL_E1000E,[$kernele1000e]) AC_SUBST(E1000E_LAYOUT, [$e1000elayout]) +#------------------------------------------------------------------------------ +# genet driver +#------------------------------------------------------------------------------ + +AC_ARG_ENABLE([genet], + AS_HELP_STRING([--enable-genet], + [Enable genet driver (for Raspi 4)]), + [ + case "${enableval}" in + yes) enablegenet=1 + ;; + no) enablegenet=0 + ;; + *) AC_MSG_ERROR([Invalid value for --enable-genet]) + ;; + esac + ], + [enablegenet=0] # disabled by default +) + +AM_CONDITIONAL(ENABLE_GENET, test "x$enablegenet" = "x1") +AC_SUBST(ENABLE_GENET,[$enablegenet]) + +AC_ARG_WITH([genet-kernel], + AC_HELP_STRING( + [--with-genet-kernel=], + [genet kernel (only if differing)] + ), + [ + kernelgenet=[$withval] + ], + [ + kernelgenet=$linuxversion + ] +) + +if test "x${enablegenet}" = "x1"; then + AC_MSG_CHECKING([for kernel for genet driver]) + + if test ! -f "${srcdir}/devices/genet/bcmgenet-${kernelgenet}-orig.c"; then + AC_MSG_ERROR([kernel $kernelgenet not available for genet driver!]) + fi + + AC_MSG_RESULT([$kernelgenet]) +fi + +AC_SUBST(KERNEL_GENET,[$kernelgenet]) + #------------------------------------------------------------------------------ # igb driver #------------------------------------------------------------------------------ @@ -479,6 +527,52 @@ if test "x${enableigb}" = "x1"; then fi AC_SUBST(KERNEL_IGB,[$kerneligb]) +#------------------------------------------------------------------------------ +# igc driver +#------------------------------------------------------------------------------ + +AC_ARG_ENABLE([igc], + AS_HELP_STRING([--enable-igc], + [Enable igc driver]), + [ + case "${enableval}" in + yes) enableigc=1 + ;; + no) enableigc=0 + ;; + *) AC_MSG_ERROR([Invalid value for --enable-igc]) + ;; + esac + ], + [enableigc=0] # disabled by default +) + +AM_CONDITIONAL(ENABLE_IGC, test "x$enableigc" = "x1") +AC_SUBST(ENABLE_IGC,[$enableigc]) + +AC_ARG_WITH([igc-kernel], + AS_HELP_STRING([--with-igc-kernel=], + [igc kernel (only if differing)] + ), + [ + kerneligc=[$withval] + ], + [ + kerneligc=$linuxversion + ] +) + +if test "x${enableigc}" = "x1"; then + AC_MSG_CHECKING([for kernel for igc driver]) + + if test ! -f "${srcdir}/devices/igc/igc_main-${kerneligc}-orig.c"; then + AC_MSG_ERROR([kernel $kerneligc not available for igc driver!]) + fi + + AC_MSG_RESULT([$kerneligc]) +fi + +AC_SUBST(KERNEL_IGC,[$kerneligc]) #------------------------------------------------------------------------------ # r8169 driver @@ -520,16 +614,25 @@ if test "x${enable_r8169}" = "x1"; then AC_MSG_CHECKING([for kernel for r8169 driver]) kernels=`ls -1 ${srcdir}/devices/ | grep -oE "^r8169-.*-" | cut -d "-" -f 2 | uniq` + kernels_subdir=`ls -1 ${srcdir}/devices/r8169/ | grep -oE "^r8169_main-.*-" | cut -d "-" -f 2 | uniq` found=0 + found_subdir=0 for k in $kernels; do if test "$kernel_r8169" = "$k"; then found=1 fi done + for k in $kernels_subdir; do + if test "$kernel_r8169" = "$k"; then + found=1 + found_subdir=1 + fi + done if test $found -ne 1; then AC_MSG_ERROR([kernel $kernel_r8169 not available for r8169 driver!]) fi + AC_SUBST(R8169_IN_SUBDIR, [$found_subdir]) AC_MSG_RESULT([$kernel_r8169]) fi @@ -1122,8 +1225,14 @@ AC_CONFIG_FILES([ devices/e1000/Makefile devices/e1000e/Kbuild devices/e1000e/Makefile + devices/genet/Kbuild + devices/genet/Makefile devices/igb/Kbuild devices/igb/Makefile + devices/igc/Kbuild + devices/igc/Makefile + devices/r8169/Kbuild + devices/r8169/Makefile ethercat.spec examples/Kbuild examples/Makefile diff --git a/devices/8139too-6.1-ethercat.c b/devices/8139too-6.1-ethercat.c new file mode 100644 index 00000000..de9ed1ed --- /dev/null +++ b/devices/8139too-6.1-ethercat.c @@ -0,0 +1,2835 @@ +/****************************************************************************** + * + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH + * + * This file is part of the IgH EtherCAT Master. + * + * The IgH EtherCAT Master is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * The IgH EtherCAT Master is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with the IgH EtherCAT Master; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * --- + * + * The license mentioned above concerns the source code only. Using the + * EtherCAT technology and brand is only permitted in compliance with the + * industrial property and similar rights of Beckhoff Automation GmbH. + * + * vim: noexpandtab + * + *****************************************************************************/ + +/** + \file + EtherCAT driver for RTL8139-compatible NICs. +*/ + +/*****************************************************************************/ + +/* + Former documentation: + + 8139too.c: A RealTek RTL-8139 Fast Ethernet driver for Linux. + + Maintained by Jeff Garzik + Copyright 2000-2002 Jeff Garzik + + Much code comes from Donald Becker's rtl8139.c driver, + versions 1.13 and older. This driver was originally based + on rtl8139.c version 1.07. Header of rtl8139.c version 1.13: + + ---------- + + Written 1997-2001 by Donald Becker. + This software may be used and distributed according to the + terms of the GNU General Public License (GPL), incorporated + herein by reference. Drivers based on or derived from this + code fall under the GPL and must retain the authorship, + copyright and license notice. This file is not a complete + program and may only be used when the entire operating + system is licensed under the GPL. + + This driver is for boards based on the RTL8129 and RTL8139 + PCI ethernet chips. + + The author may be reached as becker@scyld.com, or C/O Scyld + Computing Corporation 410 Severn Ave., Suite 210 Annapolis + MD 21403 + + Support and updates available at + http://www.scyld.com/network/rtl8139.html + + Twister-tuning table provided by Kinston + . + + ---------- + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Contributors: + + Donald Becker - he wrote the original driver, kudos to him! + (but please don't e-mail him for support, this isn't his driver) + + Tigran Aivazian - bug fixes, skbuff free cleanup + + Martin Mares - suggestions for PCI cleanup + + David S. Miller - PCI DMA and softnet updates + + Ernst Gill - fixes ported from BSD driver + + Daniel Kobras - identified specific locations of + posted MMIO write bugginess + + Gerard Sharp - bug fix, testing and feedback + + David Ford - Rx ring wrap fix + + Dan DeMaggio - swapped RTL8139 cards with me, and allowed me + to find and fix a crucial bug on older chipsets. + + Donald Becker/Chris Butterworth/Marcus Westergren - + Noticed various Rx packet size-related buglets. + + Santiago Garcia Mantinan - testing and feedback + + Jens David - 2.2.x kernel backports + + Martin Dennett - incredibly helpful insight on undocumented + features of the 8139 chips + + Jean-Jacques Michel - bug fix + + Tobias Ringström - Rx interrupt status checking suggestion + + Andrew Morton - Clear blocked signals, avoid + buffer overrun setting current->comm. + + Kalle Olavi Niemitalo - Wake-on-LAN ioctls + + Robert Kuebel - Save kernel thread from dying on any signal. + + Submitting bug reports: + + "rtl8139-diag -mmmaaavvveefN" output + enable RTL8139_DEBUG below, and look at 'dmesg' or kernel log + +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "ec_8139too" +#define DRV_VERSION "0.9.28" + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../globals.h" +#include "ecdev.h" + +#define RTL8139_DRIVER_NAME DRV_NAME \ + " EtherCAT-capable Fast Ethernet driver " \ + DRV_VERSION ", master " EC_MASTER_VERSION + +#define PFX DRV_NAME ": " + +/* Default Message level */ +#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + + +/* define to 1, 2 or 3 to enable copious debugging info */ +#define RTL8139_DEBUG 0 + +/* define to 1 to disable lightweight runtime debugging checks */ +#undef RTL8139_NDEBUG + + +#ifdef RTL8139_NDEBUG +# define assert(expr) do {} while (0) +#else +# define assert(expr) \ + if (unlikely(!(expr))) { \ + pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ + #expr, __FILE__, __func__, __LINE__); \ + } +#endif + + +/* A few user-configurable values. */ +/* media options */ +#define MAX_UNITS 8 +static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; + +/* Whether to use MMIO or PIO. Default to MMIO. */ +#ifdef CONFIG_8139TOO_PIO +static bool use_io = true; +#else +static bool use_io = false; +#endif + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +static int multicast_filter_limit = 32; + +/* bitmapped message enable number */ +static int debug = -1; + +/* + * Receive ring size + * Warning: 64K ring has hardware issues and may lock up. + */ +#if defined(CONFIG_SH_DREAMCAST) +#define RX_BUF_IDX 0 /* 8K ring */ +#else +#define RX_BUF_IDX 2 /* 32K ring */ +#endif +#define RX_BUF_LEN (8192 << RX_BUF_IDX) +#define RX_BUF_PAD 16 +#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */ + +#if RX_BUF_LEN == 65536 +#define RX_BUF_TOT_LEN RX_BUF_LEN +#else +#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD) +#endif + +/* Number of Tx descriptor registers. */ +#define NUM_TX_DESC 4 + +/* max supported ethernet frame size -- must be at least (dev->mtu+18+4).*/ +#define MAX_ETH_FRAME_SIZE 1792 + +/* max supported payload size */ +#define MAX_ETH_DATA_SIZE (MAX_ETH_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN) + +/* Size of the Tx bounce buffers -- must be at least (dev->mtu+18+4). */ +#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE +#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC) + +/* PCI Tuning Parameters + Threshold is bytes transferred to chip before transmission starts. */ +#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */ + +/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ +#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */ +#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */ +#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ +#define TX_RETRY 8 /* 0-15. retries = 16 + (TX_RETRY * 16) */ + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (6*HZ) + + +enum { + HAS_MII_XCVR = 0x010000, + HAS_CHIP_XCVR = 0x020000, + HAS_LNK_CHNG = 0x040000, +}; + +#define RTL_NUM_STATS 4 /* number of ETHTOOL_GSTATS u64's */ +#define RTL_REGS_VER 1 /* version of reg. data in ETHTOOL_GREGS */ +#define RTL_MIN_IO_SIZE 0x80 +#define RTL8139B_IO_SIZE 256 + +#define RTL8129_CAPS HAS_MII_XCVR +#define RTL8139_CAPS (HAS_CHIP_XCVR|HAS_LNK_CHNG) + +typedef enum { + RTL8139 = 0, + RTL8129, +} board_t; + + +/* indexed by board_t, above */ +static const struct { + const char *name; + u32 hw_flags; +} board_info[] = { + { "RealTek RTL8139", RTL8139_CAPS }, + { "RealTek RTL8129", RTL8129_CAPS }, +}; + + +static const struct pci_device_id rtl8139_pci_tbl[] = { + {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1259, 0xa11e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x11db, 0x1234, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1432, 0x9130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x02ac, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x16ec, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + +#ifdef CONFIG_SH_SECUREEDGE5410 + /* Bogus 8139 silicon reports 8129 without external PROM :-( */ + {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, +#endif +#ifdef CONFIG_8139TOO_8129 + {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 }, +#endif + + /* some crazy cards report invalid vendor ids like + * 0x0001 here. The other ids are valid and constant, + * so we simply don't match on the main vendor id. + */ + {PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 }, + {PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, RTL8139 }, + {PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, RTL8139 }, + + {0,} +}; + +/* prevent driver from being loaded automatically */ +//MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl); + +static struct { + const char str[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "early_rx" }, + { "tx_buf_mapped" }, + { "tx_timeouts" }, + { "rx_lost_in_ring" }, +}; + +/* The rest of these values should never change. */ + +/* Symbolic offsets to registers. */ +enum RTL8139_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAR0 = 8, /* Multicast filter. */ + TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */ + TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */ + RxBuf = 0x30, + ChipCmd = 0x37, + RxBufPtr = 0x38, + RxBufAddr = 0x3A, + IntrMask = 0x3C, + IntrStatus = 0x3E, + TxConfig = 0x40, + RxConfig = 0x44, + Timer = 0x48, /* A general-purpose counter. */ + RxMissed = 0x4C, /* 24 bits valid, write clears. */ + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + TimerInt = 0x54, + MediaStatus = 0x58, + Config3 = 0x59, + Config4 = 0x5A, /* absent on RTL-8139A */ + HltClk = 0x5B, + MultiIntr = 0x5C, + TxSummary = 0x60, + BasicModeCtrl = 0x62, + BasicModeStatus = 0x64, + NWayAdvert = 0x66, + NWayLPAR = 0x68, + NWayExpansion = 0x6A, + /* Undocumented registers, but required for proper operation. */ + FIFOTMS = 0x70, /* FIFO Control and test. */ + CSCR = 0x74, /* Chip Status and Configuration Register. */ + PARA78 = 0x78, + FlashReg = 0xD4, /* Communication with Flash ROM, four bytes. */ + PARA7c = 0x7c, /* Magic transceiver parameter register. */ + Config5 = 0xD8, /* absent on RTL-8139A */ +}; + +enum ClearBitMasks { + MultiIntrClear = 0xF000, + ChipCmdClear = 0xE2, + Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1), +}; + +enum ChipCmdBits { + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, +}; + +/* Interrupt register bits, using my own meaningful names. */ +enum IntrStatusBits { + PCIErr = 0x8000, + PCSTimeout = 0x4000, + RxFIFOOver = 0x40, + RxUnderrun = 0x20, + RxOverflow = 0x10, + TxErr = 0x08, + TxOK = 0x04, + RxErr = 0x02, + RxOK = 0x01, + + RxAckBits = RxFIFOOver | RxOverflow | RxOK, +}; + +enum TxStatusBits { + TxHostOwns = 0x2000, + TxUnderrun = 0x4000, + TxStatOK = 0x8000, + TxOutOfWindow = 0x20000000, + TxAborted = 0x40000000, + TxCarrierLost = 0x80000000, +}; +enum RxStatusBits { + RxMulticast = 0x8000, + RxPhysical = 0x4000, + RxBroadcast = 0x2000, + RxBadSymbol = 0x0020, + RxRunt = 0x0010, + RxTooLong = 0x0008, + RxCRCErr = 0x0004, + RxBadAlign = 0x0002, + RxStatusOK = 0x0001, +}; + +/* Bits in RxConfig. */ +enum rx_mode_bits { + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +}; + +/* Bits in TxConfig. */ +enum tx_config_bits { + /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */ + TxIFGShift = 24, + TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */ + TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */ + TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */ + TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */ + + TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */ + TxCRC = (1 << 16), /* DISABLE Tx pkt CRC append */ + TxClearAbt = (1 << 0), /* Clear abort (WO) */ + TxDMAShift = 8, /* DMA burst value (0-7) is shifted X many bits */ + TxRetryShift = 4, /* TXRR value (0-15) is shifted X many bits */ + + TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */ +}; + +/* Bits in Config1 */ +enum Config1Bits { + Cfg1_PM_Enable = 0x01, + Cfg1_VPD_Enable = 0x02, + Cfg1_PIO = 0x04, + Cfg1_MMIO = 0x08, + LWAKE = 0x10, /* not on 8139, 8139A */ + Cfg1_Driver_Load = 0x20, + Cfg1_LED0 = 0x40, + Cfg1_LED1 = 0x80, + SLEEP = (1 << 1), /* only on 8139, 8139A */ + PWRDN = (1 << 0), /* only on 8139, 8139A */ +}; + +/* Bits in Config3 */ +enum Config3Bits { + Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */ + Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */ + Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */ + Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */ + Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */ + Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */ + Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */ + Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */ +}; + +/* Bits in Config4 */ +enum Config4Bits { + LWPTN = (1 << 2), /* not on 8139, 8139A */ +}; + +/* Bits in Config5 */ +enum Config5Bits { + Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */ + Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */ + Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */ + Cfg5_FIFOAddrPtr= (1 << 3), /* Realtek internal SRAM testing */ + Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */ + Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */ + Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */ +}; + +enum RxConfigBits { + /* rx fifo threshold */ + RxCfgFIFOShift = 13, + RxCfgFIFONone = (7 << RxCfgFIFOShift), + + /* Max DMA burst */ + RxCfgDMAShift = 8, + RxCfgDMAUnlimited = (7 << RxCfgDMAShift), + + /* rx ring buffer length */ + RxCfgRcv8K = 0, + RxCfgRcv16K = (1 << 11), + RxCfgRcv32K = (1 << 12), + RxCfgRcv64K = (1 << 11) | (1 << 12), + + /* Disable packet wrap at end of Rx buffer. (not possible with 64k) */ + RxNoWrap = (1 << 7), +}; + +/* Twister tuning parameters from RealTek. + Completely undocumented, but required to tune bad links on some boards. */ +enum CSCRBits { + CSCR_LinkOKBit = 0x0400, + CSCR_LinkChangeBit = 0x0800, + CSCR_LinkStatusBits = 0x0f000, + CSCR_LinkDownOffCmd = 0x003c0, + CSCR_LinkDownCmd = 0x0f3c0, +}; + +enum Cfg9346Bits { + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xC0, +}; + +typedef enum { + CH_8139 = 0, + CH_8139_K, + CH_8139A, + CH_8139A_G, + CH_8139B, + CH_8130, + CH_8139C, + CH_8100, + CH_8100B_8139D, + CH_8101, +} chip_t; + +enum chip_flags { + HasHltClk = (1 << 0), + HasLWake = (1 << 1), +}; + +#define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \ + (b30<<30 | b29<<29 | b28<<28 | b27<<27 | b26<<26 | b23<<23 | b22<<22) +#define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1) + +/* directly indexed by chip_t, above */ +static const struct { + const char *name; + u32 version; /* from RTL8139C/RTL8139D docs */ + u32 flags; +} rtl_chip_info[] = { + { "RTL-8139", + HW_REVID(1, 0, 0, 0, 0, 0, 0), + HasHltClk, + }, + + { "RTL-8139 rev K", + HW_REVID(1, 1, 0, 0, 0, 0, 0), + HasHltClk, + }, + + { "RTL-8139A", + HW_REVID(1, 1, 1, 0, 0, 0, 0), + HasHltClk, /* XXX undocumented? */ + }, + + { "RTL-8139A rev G", + HW_REVID(1, 1, 1, 0, 0, 1, 0), + HasHltClk, /* XXX undocumented? */ + }, + + { "RTL-8139B", + HW_REVID(1, 1, 1, 1, 0, 0, 0), + HasLWake, + }, + + { "RTL-8130", + HW_REVID(1, 1, 1, 1, 1, 0, 0), + HasLWake, + }, + + { "RTL-8139C", + HW_REVID(1, 1, 1, 0, 1, 0, 0), + HasLWake, + }, + + { "RTL-8100", + HW_REVID(1, 1, 1, 1, 0, 1, 0), + HasLWake, + }, + + { "RTL-8100B/8139D", + HW_REVID(1, 1, 1, 0, 1, 0, 1), + HasHltClk /* XXX undocumented? */ + | HasLWake, + }, + + { "RTL-8101", + HW_REVID(1, 1, 1, 0, 1, 1, 1), + HasLWake, + }, +}; + +struct rtl_extra_stats { + unsigned long early_rx; + unsigned long tx_buf_mapped; + unsigned long tx_timeouts; + unsigned long rx_lost_in_ring; +}; + +struct rtl8139_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct rtl8139_private { + void __iomem *mmio_addr; + int drv_flags; + struct pci_dev *pci_dev; + u32 msg_enable; + struct napi_struct napi; + struct net_device *dev; + + unsigned char *rx_ring; + unsigned int cur_rx; /* RX buf index of next pkt */ + struct rtl8139_stats rx_stats; + dma_addr_t rx_ring_dma; + + unsigned int tx_flag; + unsigned long cur_tx; + unsigned long dirty_tx; + struct rtl8139_stats tx_stats; + unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */ + unsigned char *tx_bufs; /* Tx bounce buffer region. */ + dma_addr_t tx_bufs_dma; + + signed char phys[4]; /* MII device addresses. */ + + /* Twister tune state. */ + char twistie, twist_row, twist_col; + + unsigned int watchdog_fired : 1; + unsigned int default_port : 4; /* Last dev->if_port value. */ + unsigned int have_thread : 1; + + spinlock_t lock; + spinlock_t rx_lock; + + chip_t chipset; + u32 rx_config; + struct rtl_extra_stats xstats; + + struct delayed_work thread; + + struct mii_if_info mii; + unsigned int regs_len; + unsigned long fifo_copy_timeout; + + ec_device_t *ecdev; +}; + +MODULE_AUTHOR("Florian Pose "); +MODULE_DESCRIPTION("RealTek RTL-8139 EtherCAT driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(EC_MASTER_VERSION); + +module_param(use_io, bool, 0); +MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO"); +module_param(multicast_filter_limit, int, 0); +module_param_array(media, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +module_param(debug, int, 0); +MODULE_PARM_DESC (debug, "8139too bitmapped message enable number"); +MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered multicast addresses"); +MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps"); +MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)"); + +void ec_poll(struct net_device *); + +static int read_eeprom (void __iomem *ioaddr, int location, int addr_len); +static int rtl8139_open (struct net_device *dev); +static int mdio_read (struct net_device *dev, int phy_id, int location); +static void mdio_write (struct net_device *dev, int phy_id, int location, + int val); +static void rtl8139_start_thread(struct rtl8139_private *tp); +static void rtl8139_tx_timeout (struct net_device *dev, unsigned int txqueue); +static void rtl8139_init_ring (struct net_device *dev); +static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, + struct net_device *dev); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8139_poll_controller(struct net_device *dev); +#endif +static int rtl8139_set_mac_address(struct net_device *dev, void *p); +static int rtl8139_poll(struct napi_struct *napi, int budget); +static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); +static int rtl8139_close (struct net_device *dev); +static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); +static void rtl8139_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static void rtl8139_set_rx_mode (struct net_device *dev); +static void __set_rx_mode (struct net_device *dev); +static void rtl8139_hw_start (struct net_device *dev); +static void rtl8139_thread (struct work_struct *work); +static void rtl8139_tx_timeout_task(struct work_struct *work); +static const struct ethtool_ops rtl8139_ethtool_ops; + +/* write MMIO register, with flush */ +/* Flush avoids rtl8139 bug w/ posted MMIO writes */ +#define RTL_W8_F(reg, val8) do { iowrite8 ((val8), ioaddr + (reg)); ioread8 (ioaddr + (reg)); } while (0) +#define RTL_W16_F(reg, val16) do { iowrite16 ((val16), ioaddr + (reg)); ioread16 (ioaddr + (reg)); } while (0) +#define RTL_W32_F(reg, val32) do { iowrite32 ((val32), ioaddr + (reg)); ioread32 (ioaddr + (reg)); } while (0) + +/* write MMIO register */ +#define RTL_W8(reg, val8) iowrite8 ((val8), ioaddr + (reg)) +#define RTL_W16(reg, val16) iowrite16 ((val16), ioaddr + (reg)) +#define RTL_W32(reg, val32) iowrite32 ((val32), ioaddr + (reg)) + +/* read MMIO register */ +#define RTL_R8(reg) ioread8 (ioaddr + (reg)) +#define RTL_R16(reg) ioread16 (ioaddr + (reg)) +#define RTL_R32(reg) ioread32 (ioaddr + (reg)) + + +static const u16 rtl8139_intr_mask = + PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver | + TxErr | TxOK | RxErr | RxOK; + +static const u16 rtl8139_norx_intr_mask = + PCIErr | PCSTimeout | RxUnderrun | + TxErr | TxOK | RxErr ; + +#if RX_BUF_IDX == 0 +static const unsigned int rtl8139_rx_config = + RxCfgRcv8K | RxNoWrap | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#elif RX_BUF_IDX == 1 +static const unsigned int rtl8139_rx_config = + RxCfgRcv16K | RxNoWrap | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#elif RX_BUF_IDX == 2 +static const unsigned int rtl8139_rx_config = + RxCfgRcv32K | RxNoWrap | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#elif RX_BUF_IDX == 3 +static const unsigned int rtl8139_rx_config = + RxCfgRcv64K | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#else +#error "Invalid configuration for 8139_RXBUF_IDX" +#endif + +static const unsigned int rtl8139_tx_config = + TxIFG96 | (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift); + +static void __rtl8139_cleanup_dev (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + struct pci_dev *pdev; + + assert (dev != NULL); + assert (tp->pci_dev != NULL); + pdev = tp->pci_dev; + + if (tp->mmio_addr) + pci_iounmap (pdev, tp->mmio_addr); + + /* it's ok to call this even if we have no regions to free */ + pci_release_regions (pdev); + + free_netdev(dev); +} + + +static void rtl8139_chip_reset (void __iomem *ioaddr) +{ + int i; + + /* Soft reset the chip. */ + RTL_W8 (ChipCmd, CmdReset); + + /* Check that the chip has finished the reset. */ + for (i = 1000; i > 0; i--) { + barrier(); + if ((RTL_R8 (ChipCmd) & CmdReset) == 0) + break; + udelay (10); + } +} + + +static struct net_device *rtl8139_init_board(struct pci_dev *pdev) +{ + struct device *d = &pdev->dev; + void __iomem *ioaddr; + struct net_device *dev; + struct rtl8139_private *tp; + u8 tmp8; + int rc, disable_dev_on_err = 0; + unsigned int i, bar; + unsigned long io_len; + u32 version; + static const struct { + unsigned long mask; + char *type; + } res[] = { + { IORESOURCE_IO, "PIO" }, + { IORESOURCE_MEM, "MMIO" } + }; + + assert (pdev != NULL); + + /* dev and priv zeroed in alloc_etherdev */ + dev = alloc_etherdev (sizeof (*tp)); + if (dev == NULL) + return ERR_PTR(-ENOMEM); + + SET_NETDEV_DEV(dev, &pdev->dev); + + tp = netdev_priv(dev); + tp->pci_dev = pdev; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device (pdev); + if (rc) + goto err_out; + + disable_dev_on_err = 1; + rc = pci_request_regions (pdev, DRV_NAME); + if (rc) + goto err_out; + + pci_set_master (pdev); + + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); + +retry: + /* PIO bar register comes first. */ + bar = !use_io; + + io_len = pci_resource_len(pdev, bar); + + dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len); + + if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) { + dev_err(d, "region #%d not a %s resource, aborting\n", bar, + res[bar].type); + rc = -ENODEV; + goto err_out; + } + if (io_len < RTL_MIN_IO_SIZE) { + dev_err(d, "Invalid PCI %s region size(s), aborting\n", + res[bar].type); + rc = -ENODEV; + goto err_out; + } + + ioaddr = pci_iomap(pdev, bar, 0); + if (!ioaddr) { + dev_err(d, "cannot map %s\n", res[bar].type); + if (!use_io) { + use_io = true; + goto retry; + } + rc = -ENODEV; + goto err_out; + } + tp->regs_len = io_len; + tp->mmio_addr = ioaddr; + + /* Bring old chips out of low-power mode. */ + RTL_W8 (HltClk, 'R'); + + /* check for missing/broken hardware */ + if (RTL_R32 (TxConfig) == 0xFFFFFFFF) { + dev_err(&pdev->dev, "Chip not responding, ignoring board\n"); + rc = -EIO; + goto err_out; + } + + /* identify chip attached to board */ + version = RTL_R32 (TxConfig) & HW_REVID_MASK; + for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++) + if (version == rtl_chip_info[i].version) { + tp->chipset = i; + goto match; + } + + /* if unknown chip, assume array element #0, original RTL-8139 in this case */ + i = 0; + dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n"); + dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig)); + tp->chipset = 0; + +match: + pr_debug("chipset id (%d) == index %d, '%s'\n", + version, i, rtl_chip_info[i].name); + + if (tp->chipset >= CH_8139B) { + u8 new_tmp8 = tmp8 = RTL_R8 (Config1); + pr_debug("PCI PM wakeup\n"); + if ((rtl_chip_info[tp->chipset].flags & HasLWake) && + (tmp8 & LWAKE)) + new_tmp8 &= ~LWAKE; + new_tmp8 |= Cfg1_PM_Enable; + if (new_tmp8 != tmp8) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config1, tmp8); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } + if (rtl_chip_info[tp->chipset].flags & HasLWake) { + tmp8 = RTL_R8 (Config4); + if (tmp8 & LWPTN) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config4, tmp8 & ~LWPTN); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } + } + } else { + pr_debug("Old chip wakeup\n"); + tmp8 = RTL_R8 (Config1); + tmp8 &= ~(SLEEP | PWRDN); + RTL_W8 (Config1, tmp8); + } + + rtl8139_chip_reset (ioaddr); + + return dev; + +err_out: + __rtl8139_cleanup_dev (dev); + if (disable_dev_on_err) + pci_disable_device (pdev); + return ERR_PTR(rc); +} + +static int rtl8139_set_features(struct net_device *dev, netdev_features_t features) +{ + struct rtl8139_private *tp = netdev_priv(dev); + unsigned long flags; + netdev_features_t changed = features ^ dev->features; + void __iomem *ioaddr = tp->mmio_addr; + + if (!(changed & (NETIF_F_RXALL))) + return 0; + + spin_lock_irqsave(&tp->lock, flags); + + if (changed & NETIF_F_RXALL) { + int rx_mode = tp->rx_config; + if (features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + else + rx_mode &= ~(AcceptErr | AcceptRunt); + tp->rx_config = rtl8139_rx_config | rx_mode; + RTL_W32_F(RxConfig, tp->rx_config); + } + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + +static const struct net_device_ops rtl8139_netdev_ops = { + .ndo_open = rtl8139_open, + .ndo_stop = rtl8139_close, + .ndo_get_stats64 = rtl8139_get_stats64, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = rtl8139_set_mac_address, + .ndo_start_xmit = rtl8139_start_xmit, + .ndo_set_rx_mode = rtl8139_set_rx_mode, + .ndo_eth_ioctl = netdev_ioctl, + .ndo_tx_timeout = rtl8139_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8139_poll_controller, +#endif + .ndo_set_features = rtl8139_set_features, +}; + +static int rtl8139_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev = NULL; + struct rtl8139_private *tp; + __le16 addr[ETH_ALEN / 2]; + int i, addr_len, option; + void __iomem *ioaddr; + static int board_idx = -1; + + assert (pdev != NULL); + assert (ent != NULL); + + board_idx++; + + /* when we're built into the kernel, the driver version message + * is only printed if at least one 8139 board has been found + */ +#ifndef MODULE + { + static int printed_version; + if (!printed_version++) + pr_info(RTL8139_DRIVER_NAME "\n"); + } +#endif + + if (pdev->vendor == PCI_VENDOR_ID_REALTEK && + pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) { + dev_info(&pdev->dev, + "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip, use 8139cp\n", + pdev->vendor, pdev->device, pdev->revision); + return -ENODEV; + } + + if (pdev->vendor == PCI_VENDOR_ID_REALTEK && + pdev->device == PCI_DEVICE_ID_REALTEK_8139 && + pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS && + pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) { + pr_info("OQO Model 2 detected. Forcing PIO\n"); + use_io = true; + } + + dev = rtl8139_init_board (pdev); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + assert (dev != NULL); + tp = netdev_priv(dev); + tp->dev = dev; + + ioaddr = tp->mmio_addr; + assert (ioaddr != NULL); + + addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6; + for (i = 0; i < 3; i++) + addr[i] = cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len)); + eth_hw_addr_set(dev, (u8 *)addr); + + /* The Rtl8139-specific entries in the device structure. */ + dev->netdev_ops = &rtl8139_netdev_ops; + dev->ethtool_ops = &rtl8139_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + netif_napi_add(dev, &tp->napi, rtl8139_poll); + + /* note: the hardware is not capable of sg/csum/highdma, however + * through the use of skb_copy_and_csum_dev we enable these + * features + */ + dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; + dev->vlan_features = dev->features; + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* MTU range: 68 - 1770 */ + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = MAX_ETH_DATA_SIZE; + + /* tp zeroed and aligned in alloc_etherdev */ + tp = netdev_priv(dev); + + /* note: tp->chipset set in rtl8139_init_board */ + tp->drv_flags = board_info[ent->driver_data].hw_flags; + tp->mmio_addr = ioaddr; + tp->msg_enable = + (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); + spin_lock_init (&tp->lock); + spin_lock_init (&tp->rx_lock); + INIT_DELAYED_WORK(&tp->thread, rtl8139_thread); + tp->mii.dev = dev; + tp->mii.mdio_read = mdio_read; + tp->mii.mdio_write = mdio_write; + tp->mii.phy_id_mask = 0x3f; + tp->mii.reg_num_mask = 0x1f; + + /* dev is fully set up and ready to use now */ + + // offer device to EtherCAT master module + tp->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + + if (!tp->ecdev) { + pr_debug("about to register device named %s (%p)...\n", + dev->name, dev); + i = register_netdev (dev); + if (i) goto err_out; + } + + pci_set_drvdata (pdev, dev); + + pr_info("%s: %s at 0x%lx, %pM, IRQ %d\n", + dev->name, + board_info[ent->driver_data].name, + dev->base_addr, + dev->dev_addr, + dev->irq); + + pr_debug("%s: Identified 8139 chip type '%s'\n", + dev->name, rtl_chip_info[tp->chipset].name); + + /* Find the connected MII xcvrs. + Doing this in open() would allow detecting external xcvrs later, but + takes too much time. */ +#ifdef CONFIG_8139TOO_8129 + if (tp->drv_flags & HAS_MII_XCVR) { + int phy, phy_idx = 0; + for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) { + int mii_status = mdio_read(dev, phy, 1); + if (mii_status != 0xffff && mii_status != 0x0000) { + u16 advertising = mdio_read(dev, phy, 4); + tp->phys[phy_idx++] = phy; + pr_info("%s: MII transceiver %d status 0x%4.4x advertising %4.4x.\n", + dev->name, phy, mii_status, advertising); + } + } + if (phy_idx == 0) { + pr_info("%s: No MII transceivers found! Assuming SYM transceiver.\n", + dev->name); + tp->phys[0] = 32; + } + } else +#endif + tp->phys[0] = 32; + tp->mii.phy_id = tp->phys[0]; + + /* The lower four bits are the media type. */ + option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx]; + if (option > 0) { + tp->mii.full_duplex = (option & 0x210) ? 1 : 0; + tp->default_port = option & 0xFF; + if (tp->default_port) + tp->mii.force_media = 1; + } + if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0) + tp->mii.full_duplex = full_duplex[board_idx]; + if (tp->mii.full_duplex) { + pr_info("%s: Media type forced to Full Duplex.\n", dev->name); + /* Changing the MII-advertised media because might prevent + re-connection. */ + tp->mii.force_media = 1; + } + if (tp->default_port) { + pr_info(" Forcing %dMbps %s-duplex operation.\n", + (option & 0x20 ? 100 : 10), + (option & 0x10 ? "full" : "half")); + mdio_write(dev, tp->phys[0], 0, + ((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */ + ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */ + } + + /* Put the chip into low-power mode. */ + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */ + + if (tp->ecdev) { + i = ecdev_open(tp->ecdev); + if (i) { + ecdev_withdraw(tp->ecdev); + goto err_out; + } + } + + return 0; + +err_out: + __rtl8139_cleanup_dev (dev); + pci_disable_device (pdev); + return i; +} + + +static void rtl8139_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct rtl8139_private *tp = netdev_priv(dev); + + assert (dev != NULL); + + if (tp->ecdev) { + ecdev_close(tp->ecdev); + ecdev_withdraw(tp->ecdev); + } + else { + cancel_delayed_work_sync(&tp->thread); + + unregister_netdev (dev); + } + + __rtl8139_cleanup_dev (dev); + pci_disable_device (pdev); +} + + +/* Serial EEPROM section. */ + +/* EEPROM_Ctrl bits. */ +#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ +#define EE_CS 0x08 /* EEPROM chip select. */ +#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */ +#define EE_WRITE_0 0x00 +#define EE_WRITE_1 0x02 +#define EE_DATA_READ 0x01 /* EEPROM chip data out. */ +#define EE_ENB (0x80 | EE_CS) + +/* Delay between EEPROM clock transitions. + No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. + */ + +#define eeprom_delay() (void)RTL_R8(Cfg9346) + +/* The EEPROM commands include the alway-set leading bit. */ +#define EE_WRITE_CMD (5) +#define EE_READ_CMD (6) +#define EE_ERASE_CMD (7) + +static int read_eeprom(void __iomem *ioaddr, int location, int addr_len) +{ + int i; + unsigned retval = 0; + int read_cmd = location | (EE_READ_CMD << addr_len); + + RTL_W8 (Cfg9346, EE_ENB & ~EE_CS); + RTL_W8 (Cfg9346, EE_ENB); + eeprom_delay (); + + /* Shift the read command bits out. */ + for (i = 4 + addr_len; i >= 0; i--) { + int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; + RTL_W8 (Cfg9346, EE_ENB | dataval); + eeprom_delay (); + RTL_W8 (Cfg9346, EE_ENB | dataval | EE_SHIFT_CLK); + eeprom_delay (); + } + RTL_W8 (Cfg9346, EE_ENB); + eeprom_delay (); + + for (i = 16; i > 0; i--) { + RTL_W8 (Cfg9346, EE_ENB | EE_SHIFT_CLK); + eeprom_delay (); + retval = + (retval << 1) | ((RTL_R8 (Cfg9346) & EE_DATA_READ) ? 1 : + 0); + RTL_W8 (Cfg9346, EE_ENB); + eeprom_delay (); + } + + /* Terminate the EEPROM access. */ + RTL_W8(Cfg9346, 0); + eeprom_delay (); + + return retval; +} + +/* MII serial management: mostly bogus for now. */ +/* Read and write the MII management registers using software-generated + serial MDIO protocol. + The maximum data clock rate is 2.5 Mhz. The minimum timing is usually + met by back-to-back PCI I/O cycles, but we insert a delay to avoid + "overclocking" issues. */ +#define MDIO_DIR 0x80 +#define MDIO_DATA_OUT 0x04 +#define MDIO_DATA_IN 0x02 +#define MDIO_CLK 0x01 +#define MDIO_WRITE0 (MDIO_DIR) +#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT) + +#define mdio_delay() RTL_R8(Config4) + + +static const char mii_2_8139_map[8] = { + BasicModeCtrl, + BasicModeStatus, + 0, + 0, + NWayAdvert, + NWayLPAR, + NWayExpansion, + 0 +}; + + +#ifdef CONFIG_8139TOO_8129 +/* Syncronize the MII management interface by shifting 32 one bits out. */ +static void mdio_sync (void __iomem *ioaddr) +{ + int i; + + for (i = 32; i >= 0; i--) { + RTL_W8 (Config4, MDIO_WRITE1); + mdio_delay (); + RTL_W8 (Config4, MDIO_WRITE1 | MDIO_CLK); + mdio_delay (); + } +} +#endif + +static int mdio_read (struct net_device *dev, int phy_id, int location) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int retval = 0; +#ifdef CONFIG_8139TOO_8129 + void __iomem *ioaddr = tp->mmio_addr; + int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; + int i; +#endif + + if (phy_id > 31) { /* Really a 8139. Use internal registers. */ + void __iomem *ioaddr = tp->mmio_addr; + return location < 8 && mii_2_8139_map[location] ? + RTL_R16 (mii_2_8139_map[location]) : 0; + } + +#ifdef CONFIG_8139TOO_8129 + mdio_sync (ioaddr); + /* Shift the read command bits out. */ + for (i = 15; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0; + + RTL_W8 (Config4, MDIO_DIR | dataval); + mdio_delay (); + RTL_W8 (Config4, MDIO_DIR | dataval | MDIO_CLK); + mdio_delay (); + } + + /* Read the two transition, 16 data, and wire-idle bits. */ + for (i = 19; i > 0; i--) { + RTL_W8 (Config4, 0); + mdio_delay (); + retval = (retval << 1) | ((RTL_R8 (Config4) & MDIO_DATA_IN) ? 1 : 0); + RTL_W8 (Config4, MDIO_CLK); + mdio_delay (); + } +#endif + + return (retval >> 1) & 0xffff; +} + + +static void mdio_write (struct net_device *dev, int phy_id, int location, + int value) +{ + struct rtl8139_private *tp = netdev_priv(dev); +#ifdef CONFIG_8139TOO_8129 + void __iomem *ioaddr = tp->mmio_addr; + int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value; + int i; +#endif + + if (phy_id > 31) { /* Really a 8139. Use internal registers. */ + void __iomem *ioaddr = tp->mmio_addr; + if (location == 0) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W16 (BasicModeCtrl, value); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } else if (location < 8 && mii_2_8139_map[location]) + RTL_W16 (mii_2_8139_map[location], value); + return; + } + +#ifdef CONFIG_8139TOO_8129 + mdio_sync (ioaddr); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = + (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; + RTL_W8 (Config4, dataval); + mdio_delay (); + RTL_W8 (Config4, dataval | MDIO_CLK); + mdio_delay (); + } + /* Clear out extra bits. */ + for (i = 2; i > 0; i--) { + RTL_W8 (Config4, 0); + mdio_delay (); + RTL_W8 (Config4, MDIO_CLK); + mdio_delay (); + } +#endif +} + + +static int rtl8139_open (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + const int irq = tp->pci_dev->irq; + int retval; + + if (!tp->ecdev) { + retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); + if (retval) + return retval; + } + + tp->tx_bufs = dma_alloc_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + &tp->tx_bufs_dma, GFP_KERNEL); + tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + &tp->rx_ring_dma, GFP_KERNEL); + if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { + if (!tp->ecdev) { + free_irq(irq, dev); + } + + if (tp->tx_bufs) + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + tp->tx_bufs, tp->tx_bufs_dma); + if (tp->rx_ring) + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + tp->rx_ring, tp->rx_ring_dma); + + return -ENOMEM; + + } + + napi_enable(&tp->napi); + + tp->mii.full_duplex = tp->mii.force_media; + tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000; + + rtl8139_init_ring (dev); + rtl8139_hw_start (dev); + if (!tp->ecdev) { + netif_start_queue (dev); + } + + netif_dbg(tp, ifup, dev, + "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n", + __func__, + (unsigned long long)pci_resource_start (tp->pci_dev, 1), + irq, RTL_R8 (MediaStatus), + tp->mii.full_duplex ? "full" : "half"); + + if (!tp->ecdev) { + rtl8139_start_thread(tp); + } + + return 0; +} + + +static void rtl_check_media (struct net_device *dev, unsigned int init_media) +{ + struct rtl8139_private *tp = netdev_priv(dev); + + if (tp->ecdev) { + void __iomem *ioaddr = tp->mmio_addr; + u16 state = RTL_R16(BasicModeStatus) & BMSR_LSTATUS; + ecdev_set_link(tp->ecdev, state ? 1 : 0); + } + else { + if (tp->phys[0] >= 0) { + mii_check_media(&tp->mii, netif_msg_link(tp), init_media); + } + } +} + +/* Start the hardware at open or resume. */ +static void rtl8139_hw_start (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 i; + u8 tmp; + + /* Bring old chips out of low-power mode. */ + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'R'); + + rtl8139_chip_reset (ioaddr); + + /* unlock Config[01234] and BMCR register writes */ + RTL_W8_F (Cfg9346, Cfg9346_Unlock); + /* Restore our idea of the MAC address. */ + RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); + RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4))); + + tp->cur_rx = 0; + + /* init Rx ring buffer DMA address */ + RTL_W32_F (RxBuf, tp->rx_ring_dma); + + /* Must enable Tx/Rx before setting transfer thresholds! */ + RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); + + tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys; + RTL_W32 (RxConfig, tp->rx_config); + RTL_W32 (TxConfig, rtl8139_tx_config); + + rtl_check_media (dev, 1); + + if (tp->chipset >= CH_8139B) { + /* Disable magic packet scanning, which is enabled + * when PM is enabled in Config1. It can be reenabled + * via ETHTOOL_SWOL if desired. */ + RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic); + } + + netdev_dbg(dev, "init buffer addresses\n"); + + /* Lock Config[01234] and BMCR register writes */ + RTL_W8 (Cfg9346, Cfg9346_Lock); + + /* init Tx buffer DMA addresses */ + for (i = 0; i < NUM_TX_DESC; i++) + RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs)); + + RTL_W32 (RxMissed, 0); + + rtl8139_set_rx_mode (dev); + + /* no early-rx interrupts */ + RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear); + + /* make sure RxTx has started */ + tmp = RTL_R8 (ChipCmd); + if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb))) + RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); + + if (!tp->ecdev) { + /* Enable all known interrupts by setting the interrupt mask. */ + RTL_W16 (IntrMask, rtl8139_intr_mask); + } +} + + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void rtl8139_init_ring (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int i; + + tp->cur_rx = 0; + tp->cur_tx = 0; + tp->dirty_tx = 0; + + for (i = 0; i < NUM_TX_DESC; i++) + tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE]; +} + + +/* This must be global for CONFIG_8139TOO_TUNE_TWISTER case */ +static int next_tick = 3 * HZ; + +#ifndef CONFIG_8139TOO_TUNE_TWISTER +static inline void rtl8139_tune_twister (struct net_device *dev, + struct rtl8139_private *tp) {} +#else +enum TwisterParamVals { + PARA78_default = 0x78fa8388, + PARA7c_default = 0xcb38de43, /* param[0][3] */ + PARA7c_xxx = 0xcb38de43, +}; + +static const unsigned long param[4][4] = { + {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43}, + {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, + {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, + {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83} +}; + +static void rtl8139_tune_twister (struct net_device *dev, + struct rtl8139_private *tp) +{ + int linkcase; + void __iomem *ioaddr = tp->mmio_addr; + + /* This is a complicated state machine to configure the "twister" for + impedance/echos based on the cable length. + All of this is magic and undocumented. + */ + switch (tp->twistie) { + case 1: + if (RTL_R16 (CSCR) & CSCR_LinkOKBit) { + /* We have link beat, let us tune the twister. */ + RTL_W16 (CSCR, CSCR_LinkDownOffCmd); + tp->twistie = 2; /* Change to state 2. */ + next_tick = HZ / 10; + } else { + /* Just put in some reasonable defaults for when beat returns. */ + RTL_W16 (CSCR, CSCR_LinkDownCmd); + RTL_W32 (FIFOTMS, 0x20); /* Turn on cable test mode. */ + RTL_W32 (PARA78, PARA78_default); + RTL_W32 (PARA7c, PARA7c_default); + tp->twistie = 0; /* Bail from future actions. */ + } + break; + case 2: + /* Read how long it took to hear the echo. */ + linkcase = RTL_R16 (CSCR) & CSCR_LinkStatusBits; + if (linkcase == 0x7000) + tp->twist_row = 3; + else if (linkcase == 0x3000) + tp->twist_row = 2; + else if (linkcase == 0x1000) + tp->twist_row = 1; + else + tp->twist_row = 0; + tp->twist_col = 0; + tp->twistie = 3; /* Change to state 2. */ + next_tick = HZ / 10; + break; + case 3: + /* Put out four tuning parameters, one per 100msec. */ + if (tp->twist_col == 0) + RTL_W16 (FIFOTMS, 0); + RTL_W32 (PARA7c, param[(int) tp->twist_row] + [(int) tp->twist_col]); + next_tick = HZ / 10; + if (++tp->twist_col >= 4) { + /* For short cables we are done. + For long cables (row == 3) check for mistune. */ + tp->twistie = + (tp->twist_row == 3) ? 4 : 0; + } + break; + case 4: + /* Special case for long cables: check for mistune. */ + if ((RTL_R16 (CSCR) & + CSCR_LinkStatusBits) == 0x7000) { + tp->twistie = 0; + break; + } else { + RTL_W32 (PARA7c, 0xfb38de03); + tp->twistie = 5; + next_tick = HZ / 10; + } + break; + case 5: + /* Retune for shorter cable (column 2). */ + RTL_W32 (FIFOTMS, 0x20); + RTL_W32 (PARA78, PARA78_default); + RTL_W32 (PARA7c, PARA7c_default); + RTL_W32 (FIFOTMS, 0x00); + tp->twist_row = 2; + tp->twist_col = 0; + tp->twistie = 3; + next_tick = HZ / 10; + break; + + default: + /* do nothing */ + break; + } +} +#endif /* CONFIG_8139TOO_TUNE_TWISTER */ + +static inline void rtl8139_thread_iter (struct net_device *dev, + struct rtl8139_private *tp, + void __iomem *ioaddr) +{ + int mii_lpa; + + mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA); + + if (!tp->mii.force_media && mii_lpa != 0xffff) { + int duplex = ((mii_lpa & LPA_100FULL) || + (mii_lpa & 0x01C0) == 0x0040); + if (tp->mii.full_duplex != duplex) { + tp->mii.full_duplex = duplex; + + if (mii_lpa) { + netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n", + tp->mii.full_duplex ? "full" : "half", + tp->phys[0], mii_lpa); + } else { + netdev_info(dev, "media is unconnected, link down, or incompatible connection\n"); + } +#if 0 + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config1, tp->mii.full_duplex ? 0x60 : 0x20); + RTL_W8 (Cfg9346, Cfg9346_Lock); +#endif + } + } + + next_tick = HZ * 60; + + rtl8139_tune_twister (dev, tp); + + netdev_dbg(dev, "Media selection tick, Link partner %04x\n", + RTL_R16(NWayLPAR)); + netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x\n", + RTL_R16(IntrMask), RTL_R16(IntrStatus)); + netdev_dbg(dev, "Chip config %02x %02x\n", + RTL_R8(Config0), RTL_R8(Config1)); +} + +static void rtl8139_thread (struct work_struct *work) +{ + struct rtl8139_private *tp = + container_of(work, struct rtl8139_private, thread.work); + struct net_device *dev = tp->mii.dev; + unsigned long thr_delay = next_tick; + + rtnl_lock(); + + if (!netif_running(dev)) + goto out_unlock; + + if (tp->watchdog_fired) { + tp->watchdog_fired = 0; + rtl8139_tx_timeout_task(work); + } else + rtl8139_thread_iter(dev, tp, tp->mmio_addr); + + if (tp->have_thread) + schedule_delayed_work(&tp->thread, thr_delay); +out_unlock: + rtnl_unlock (); +} + +static void rtl8139_start_thread(struct rtl8139_private *tp) +{ + tp->twistie = 0; + if (tp->chipset == CH_8139_K) + tp->twistie = 1; + else if (tp->drv_flags & HAS_LNK_CHNG) + return; + + tp->have_thread = 1; + tp->watchdog_fired = 0; + + schedule_delayed_work(&tp->thread, next_tick); +} + +static inline void rtl8139_tx_clear (struct rtl8139_private *tp) +{ + tp->cur_tx = 0; + tp->dirty_tx = 0; + + /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ +} + +static void rtl8139_tx_timeout_task (struct work_struct *work) +{ + struct rtl8139_private *tp = + container_of(work, struct rtl8139_private, thread.work); + struct net_device *dev = tp->mii.dev; + void __iomem *ioaddr = tp->mmio_addr; + int i; + u8 tmp8; + + napi_disable(&tp->napi); + netif_stop_queue(dev); + synchronize_rcu(); + + netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n", + RTL_R8(ChipCmd), RTL_R16(IntrStatus), + RTL_R16(IntrMask), RTL_R8(MediaStatus)); + /* Emit info to figure out what went wrong. */ + netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n", + tp->cur_tx, tp->dirty_tx); + for (i = 0; i < NUM_TX_DESC; i++) + netdev_dbg(dev, "Tx descriptor %d is %08x%s\n", + i, RTL_R32(TxStatus0 + (i * 4)), + i == tp->dirty_tx % NUM_TX_DESC ? + " (queue head)" : ""); + + tp->xstats.tx_timeouts++; + + /* disable Tx ASAP, if not already */ + tmp8 = RTL_R8 (ChipCmd); + if (tmp8 & CmdTxEnb) + RTL_W8 (ChipCmd, CmdRxEnb); + + if (tp->ecdev) { + rtl8139_tx_clear (tp); + rtl8139_hw_start (dev); + } + else { + spin_lock_bh(&tp->rx_lock); + /* Disable interrupts by clearing the interrupt mask. */ + RTL_W16 (IntrMask, 0x0000); + + /* Stop a shared interrupt from scavenging while we are. */ + spin_lock_irq(&tp->lock); + rtl8139_tx_clear (tp); + spin_unlock_irq(&tp->lock); + + /* ...and finally, reset everything */ + napi_enable(&tp->napi); + rtl8139_hw_start(dev); + netif_wake_queue(dev); + + spin_unlock_bh(&tp->rx_lock); + } +} + +static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct rtl8139_private *tp = netdev_priv(dev); + + tp->watchdog_fired = 1; + if (!tp->ecdev && !tp->have_thread) { + INIT_DELAYED_WORK(&tp->thread, rtl8139_thread); + schedule_delayed_work(&tp->thread, next_tick); + } +} + +static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, + struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned int entry; + unsigned int len = skb->len; + unsigned long flags = 0; + + /* Calculate the next Tx descriptor entry. */ + entry = tp->cur_tx % NUM_TX_DESC; + + /* Note: the chip doesn't have auto-pad! */ + if (likely(len < TX_BUF_SIZE)) { + if (len < ETH_ZLEN) + memset(tp->tx_buf[entry], 0, ETH_ZLEN); + skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); + if (!tp->ecdev) { + dev_kfree_skb_any(skb); + } + } else { + if (!tp->ecdev) { + dev_kfree_skb_any(skb); + } + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + if (!tp->ecdev) { + spin_lock_irqsave(&tp->lock, flags); + } + /* + * Writing to TxStatus triggers a DMA transfer of the data + * copied to tp->tx_buf[entry] above. Use a memory barrier + * to make sure that the device sees the updated data. + */ + wmb(); + RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), + tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); + + tp->cur_tx++; + + if (!tp->ecdev) { + if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) + netif_stop_queue (dev); + spin_unlock_irqrestore(&tp->lock, flags); + } + + netif_dbg(tp, tx_queued, dev, "Queued Tx packet size %u to slot %d\n", + len, entry); + + return NETDEV_TX_OK; +} + + +static void rtl8139_tx_interrupt (struct net_device *dev, + struct rtl8139_private *tp, + void __iomem *ioaddr) +{ + unsigned long dirty_tx, tx_left; + + assert (dev != NULL); + assert (ioaddr != NULL); + + dirty_tx = tp->dirty_tx; + tx_left = tp->cur_tx - dirty_tx; + while (tx_left > 0) { + int entry = dirty_tx % NUM_TX_DESC; + int txstatus; + + txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32))); + + if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted))) + break; /* It still hasn't been Txed */ + + /* Note: TxCarrierLost is always asserted at 100mbps. */ + if (txstatus & (TxOutOfWindow | TxAborted)) { + /* There was an major error, log it. */ + netif_dbg(tp, tx_err, dev, "Transmit error, Tx status %08x\n", + txstatus); + dev->stats.tx_errors++; + if (txstatus & TxAborted) { + dev->stats.tx_aborted_errors++; + RTL_W32 (TxConfig, TxClearAbt); + RTL_W16 (IntrStatus, TxErr); + wmb(); + } + if (txstatus & TxCarrierLost) + dev->stats.tx_carrier_errors++; + if (txstatus & TxOutOfWindow) + dev->stats.tx_window_errors++; + } else { + if (txstatus & TxUnderrun) { + /* Add 64 to the Tx FIFO threshold. */ + if (tp->tx_flag < 0x00300000) + tp->tx_flag += 0x00020000; + dev->stats.tx_fifo_errors++; + } + dev->stats.collisions += (txstatus >> 24) & 15; + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += txstatus & 0x7ff; + u64_stats_update_end(&tp->tx_stats.syncp); + } + + dirty_tx++; + tx_left--; + } + +#ifndef RTL8139_NDEBUG + if (tp->cur_tx - dirty_tx > NUM_TX_DESC) { + pr_err("%s: Out-of-sync dirty pointer, %ld vs. %ld.\n", + dev->name, dirty_tx, tp->cur_tx); + dirty_tx += NUM_TX_DESC; + } +#endif /* RTL8139_NDEBUG */ + + /* only wake the queue if we did work, and the queue is stopped */ + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + mb(); + if (!tp->ecdev) { + netif_wake_queue (dev); + } + } +} + + +/* TODO: clean this up! Rx reset need not be this intensive */ +static void rtl8139_rx_err (u32 rx_status, struct net_device *dev, + struct rtl8139_private *tp, void __iomem *ioaddr) +{ + u8 tmp8; +#ifdef CONFIG_8139_OLD_RX_RESET + int tmp_work; +#endif + + if (netif_msg_rx_err (tp)) + pr_debug("%s: Ethernet frame had errors, status %8.8x.\n", + dev->name, rx_status); + dev->stats.rx_errors++; + if (!(rx_status & RxStatusOK)) { + if (rx_status & RxTooLong) { + pr_debug("%s: Oversized Ethernet frame, status %4.4x!\n", + dev->name, rx_status); + /* A.C.: The chip hangs here. */ + } + if (rx_status & (RxBadSymbol | RxBadAlign)) + dev->stats.rx_frame_errors++; + if (rx_status & (RxRunt | RxTooLong)) + dev->stats.rx_length_errors++; + if (rx_status & RxCRCErr) + dev->stats.rx_crc_errors++; + } else { + tp->xstats.rx_lost_in_ring++; + } + +#ifndef CONFIG_8139_OLD_RX_RESET + tmp8 = RTL_R8 (ChipCmd); + RTL_W8 (ChipCmd, tmp8 & ~CmdRxEnb); + RTL_W8 (ChipCmd, tmp8); + RTL_W32 (RxConfig, tp->rx_config); + tp->cur_rx = 0; +#else + /* Reset the receiver, based on RealTek recommendation. (Bug?) */ + + /* disable receive */ + RTL_W8_F (ChipCmd, CmdTxEnb); + tmp_work = 200; + while (--tmp_work > 0) { + udelay(1); + tmp8 = RTL_R8 (ChipCmd); + if (!(tmp8 & CmdRxEnb)) + break; + } + if (tmp_work <= 0) + pr_warning(PFX "rx stop wait too long\n"); + /* restart receive */ + tmp_work = 200; + while (--tmp_work > 0) { + RTL_W8_F (ChipCmd, CmdRxEnb | CmdTxEnb); + udelay(1); + tmp8 = RTL_R8 (ChipCmd); + if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb)) + break; + } + if (tmp_work <= 0) + pr_warning(PFX "tx/rx enable wait too long\n"); + + /* and reinitialize all rx related registers */ + RTL_W8_F (Cfg9346, Cfg9346_Unlock); + /* Must enable Tx/Rx before setting transfer thresholds! */ + RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); + + tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys; + RTL_W32 (RxConfig, tp->rx_config); + tp->cur_rx = 0; + + pr_debug("init buffer addresses\n"); + + /* Lock Config[01234] and BMCR register writes */ + RTL_W8 (Cfg9346, Cfg9346_Lock); + + /* init Rx ring buffer DMA address */ + RTL_W32_F (RxBuf, tp->rx_ring_dma); + + /* A.C.: Reset the multicast list. */ + __set_rx_mode (dev); +#endif +} + +#if RX_BUF_IDX == 3 +static inline void wrap_copy(struct sk_buff *skb, const unsigned char *ring, + u32 offset, unsigned int size) +{ + u32 left = RX_BUF_LEN - offset; + + if (size > left) { + skb_copy_to_linear_data(skb, ring + offset, left); + skb_copy_to_linear_data_offset(skb, left, ring, size - left); + } else + skb_copy_to_linear_data(skb, ring + offset, size); +} +#endif + +static void rtl8139_isr_ack(struct rtl8139_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u16 status; + + status = RTL_R16 (IntrStatus) & RxAckBits; + + /* Clear out errors and receive interrupts */ + if (likely(status != 0)) { + if (unlikely(status & (RxFIFOOver | RxOverflow))) { + tp->dev->stats.rx_errors++; + if (status & RxFIFOOver) + tp->dev->stats.rx_fifo_errors++; + } + RTL_W16_F (IntrStatus, RxAckBits); + } +} + +static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, + int budget) +{ + void __iomem *ioaddr = tp->mmio_addr; + int received = 0; + unsigned char *rx_ring = tp->rx_ring; + unsigned int cur_rx = tp->cur_rx; + unsigned int rx_size = 0; + + pr_debug("%s: In rtl8139_rx(), current %4.4x BufAddr %4.4x," + " free to %4.4x, Cmd %2.2x.\n", dev->name, (u16)cur_rx, + RTL_R16 (RxBufAddr), + RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd)); + + while ((tp->ecdev || netif_running(dev)) + && received < budget + && (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) { + u32 ring_offset = cur_rx % RX_BUF_LEN; + u32 rx_status; + unsigned int pkt_size; + struct sk_buff *skb; + + rmb(); + + /* read size+status of next frame from DMA ring buffer */ + rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset)); + rx_size = rx_status >> 16; + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = rx_size - 4; + else + pkt_size = rx_size; + + if (!tp->ecdev) { + if (netif_msg_rx_status(tp)) + pr_debug("%s: rtl8139_rx() status %4.4x, size %4.4x," + " cur %4.4x.\n", dev->name, rx_status, + rx_size, cur_rx); + } +#if RTL8139_DEBUG > 2 + { + int i; + pr_debug("%s: Frame contents ", dev->name); + for (i = 0; i < 70; i++) + pr_cont(" %2.2x", + rx_ring[ring_offset + i]); + pr_cont(".\n"); + } +#endif + + /* Packet copy from FIFO still in progress. + * Theoretically, this should never happen + * since EarlyRx is disabled. + */ + if (unlikely(rx_size == 0xfff0)) { + if (!tp->fifo_copy_timeout) + tp->fifo_copy_timeout = jiffies + 2; + else if (time_after(jiffies, tp->fifo_copy_timeout)) { + pr_debug("%s: hung FIFO. Reset.", dev->name); + rx_size = 0; + goto no_early_rx; + } + if (netif_msg_intr(tp)) { + pr_debug("%s: fifo copy in progress.", + dev->name); + } + tp->xstats.early_rx++; + break; + } + +no_early_rx: + tp->fifo_copy_timeout = 0; + + /* If Rx err or invalid rx_size/rx_status received + * (which happens if we get lost in the ring), + * Rx process gets reset, so we abort any further + * Rx processing. + */ + if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) || + (rx_size < 8) || + (!(rx_status & RxStatusOK)))) { + if ((dev->features & NETIF_F_RXALL) && + (rx_size <= (MAX_ETH_FRAME_SIZE + 4)) && + (rx_size >= 8) && + (!(rx_status & RxStatusOK))) { + /* Length is at least mostly OK, but pkt has + * error. I'm hoping we can handle some of these + * errors without resetting the chip. --Ben + */ + dev->stats.rx_errors++; + if (rx_status & RxCRCErr) { + dev->stats.rx_crc_errors++; + goto keep_pkt; + } + if (rx_status & RxRunt) { + dev->stats.rx_length_errors++; + goto keep_pkt; + } + } + rtl8139_rx_err (rx_status, dev, tp, ioaddr); + received = -1; + goto out; + } + +keep_pkt: + if (tp->ecdev) { + ecdev_receive(tp->ecdev, &rx_ring[ring_offset + 4], pkt_size); + } + else { + /* Malloc up new buffer, compatible with net-2e. */ + /* Omit the four octet CRC from the length. */ + + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (likely(skb)) { +#if RX_BUF_IDX == 3 + wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); +#else + skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], + pkt_size); +#endif + skb_put (skb, pkt_size); + + skb->protocol = eth_type_trans (skb, dev); + + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); + + netif_receive_skb (skb); + } else { + dev->stats.rx_dropped++; + } + } + received++; + + cur_rx = (cur_rx + rx_size + 4 + 3) & ~3; + RTL_W16 (RxBufPtr, (u16) (cur_rx - 16)); + + rtl8139_isr_ack(tp); + } + + if (unlikely(!received || rx_size == 0xfff0)) + rtl8139_isr_ack(tp); + + pr_debug("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x," + " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx, + RTL_R16 (RxBufAddr), + RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd)); + + tp->cur_rx = cur_rx; + + /* + * The receive buffer should be mostly empty. + * Tell NAPI to reenable the Rx irq. + */ + if (tp->fifo_copy_timeout) + received = budget; + +out: + return received; +} + + +static void rtl8139_weird_interrupt (struct net_device *dev, + struct rtl8139_private *tp, + void __iomem *ioaddr, + int status, int link_changed) +{ + pr_debug("%s: Abnormal interrupt, status %8.8x.\n", + dev->name, status); + + assert (dev != NULL); + assert (tp != NULL); + assert (ioaddr != NULL); + + /* Update the error count. */ + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + + if ((status & RxUnderrun) && link_changed && + (tp->drv_flags & HAS_LNK_CHNG)) { + rtl_check_media(dev, 0); + status &= ~RxUnderrun; + } + + if (status & (RxUnderrun | RxErr)) + dev->stats.rx_errors++; + + if (status & PCSTimeout) + dev->stats.rx_length_errors++; + if (status & RxUnderrun) + dev->stats.rx_fifo_errors++; + if (status & PCIErr) { + u16 pci_cmd_status; + pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status); + pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status); + + pr_err("%s: PCI Bus error %4.4x.\n", + dev->name, pci_cmd_status); + } +} + +static int rtl8139_poll(struct napi_struct *napi, int budget) +{ + struct rtl8139_private *tp = container_of(napi, struct rtl8139_private, napi); + struct net_device *dev = tp->dev; + void __iomem *ioaddr = tp->mmio_addr; + int work_done; + + spin_lock(&tp->rx_lock); + work_done = 0; + if (likely(RTL_R16(IntrStatus) & RxAckBits)) + work_done += rtl8139_rx(dev, tp, budget); + + if (work_done < budget) { + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + if (napi_complete_done(napi, work_done)) + RTL_W16_F(IntrMask, rtl8139_intr_mask); + spin_unlock_irqrestore(&tp->lock, flags); + } + spin_unlock(&tp->rx_lock); + + return work_done; +} + +void ec_poll(struct net_device *dev) +{ + rtl8139_interrupt(0, dev); +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) +{ + struct net_device *dev = (struct net_device *) dev_instance; + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u16 status, ackstat; + int link_changed = 0; /* avoid bogus "uninit" warning */ + int handled = 0; + + if (tp->ecdev) { + status = RTL_R16 (IntrStatus); + } + else { + spin_lock (&tp->lock); + status = RTL_R16 (IntrStatus); + + /* shared irq? */ + if (unlikely((status & rtl8139_intr_mask) == 0)) + goto out; + } + + handled = 1; + + /* h/w no longer present (hotplug?) or major error, bail */ + if (unlikely(status == 0xFFFF)) + goto out; + + if (!tp->ecdev) { + /* close possible race's with dev_close */ + if (unlikely(!netif_running(dev))) { + RTL_W16 (IntrMask, 0); + goto out; + } + } + + /* Acknowledge all of the current interrupt sources ASAP, but + an first get an additional status bit from CSCR. */ + if (unlikely(status & RxUnderrun)) + link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit; + + ackstat = status & ~(RxAckBits | TxErr); + if (ackstat) + RTL_W16 (IntrStatus, ackstat); + + /* Receive packets are processed by poll routine. + If not running start it now. */ + if (status & RxAckBits){ + if (tp->ecdev) { + /* EtherCAT device: Just receive all frames */ + rtl8139_rx(dev, tp, 100); // FIXME + } else { + /* Mark for polling */ + if (napi_schedule_prep(&tp->napi)) { + RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); + __napi_schedule(&tp->napi); + } + } + } + + /* Check uncommon events with one test. */ + if (unlikely(status & (PCIErr | PCSTimeout | RxUnderrun | RxErr))) + rtl8139_weird_interrupt (dev, tp, ioaddr, + status, link_changed); + + if (status & (TxOK | TxErr)) { + rtl8139_tx_interrupt (dev, tp, ioaddr); + if (status & TxErr) + RTL_W16 (IntrStatus, TxErr); + } +out: + if (!tp->ecdev) { + spin_unlock (&tp->lock); + } + + pr_debug("%s: exiting interrupt, intr_status=%#4.4x.\n", + dev->name, RTL_R16 (IntrStatus)); + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void rtl8139_poll_controller(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + + disable_irq_nosync(irq); + rtl8139_interrupt(irq, dev); + enable_irq(irq); +} +#endif + +static int rtl8139_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(dev, addr->sa_data); + + spin_lock_irq(&tp->lock); + + RTL_W8_F(Cfg9346, Cfg9346_Unlock); + RTL_W32_F(MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0))); + RTL_W32_F(MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4))); + RTL_W8_F(Cfg9346, Cfg9346_Lock); + + spin_unlock_irq(&tp->lock); + + return 0; +} + +static int rtl8139_close (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags = 0; + + if (!tp->ecdev) { + netif_stop_queue(dev); + napi_disable(&tp->napi); + + netif_dbg(tp, ifdown, dev, + "Shutting down ethercard, status was 0x%04x\n", + RTL_R16(IntrStatus)); + + spin_lock_irqsave (&tp->lock, flags); + } + + /* Stop the chip's Tx and Rx DMA processes. */ + RTL_W8 (ChipCmd, 0); + + /* Disable interrupts by clearing the interrupt mask. */ + RTL_W16 (IntrMask, 0); + + /* Update the error counts. */ + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + + if (!tp->ecdev) { + spin_unlock_irqrestore (&tp->lock, flags); + + free_irq(tp->pci_dev->irq, dev); + } + + rtl8139_tx_clear (tp); + + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + tp->rx_ring, tp->rx_ring_dma); + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + tp->tx_bufs, tp->tx_bufs_dma); + tp->rx_ring = NULL; + tp->tx_bufs = NULL; + + /* Green! Put the chip in low-power mode. */ + RTL_W8 (Cfg9346, Cfg9346_Unlock); + + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */ + + return 0; +} + + +/* Get the ethtool Wake-on-LAN settings. Assumes that wol points to + kernel memory, *wol has been initialized as {ETHTOOL_GWOL}, and + other threads or interrupts aren't messing with the 8139. */ +static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + spin_lock_irq(&tp->lock); + if (rtl_chip_info[tp->chipset].flags & HasLWake) { + u8 cfg3 = RTL_R8 (Config3); + u8 cfg5 = RTL_R8 (Config5); + + wol->supported = WAKE_PHY | WAKE_MAGIC + | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; + + wol->wolopts = 0; + if (cfg3 & Cfg3_LinkUp) + wol->wolopts |= WAKE_PHY; + if (cfg3 & Cfg3_Magic) + wol->wolopts |= WAKE_MAGIC; + /* (KON)FIXME: See how netdev_set_wol() handles the + following constants. */ + if (cfg5 & Cfg5_UWF) + wol->wolopts |= WAKE_UCAST; + if (cfg5 & Cfg5_MWF) + wol->wolopts |= WAKE_MCAST; + if (cfg5 & Cfg5_BWF) + wol->wolopts |= WAKE_BCAST; + } + spin_unlock_irq(&tp->lock); +} + + +/* Set the ethtool Wake-on-LAN settings. Return 0 or -errno. Assumes + that wol points to kernel memory and other threads or interrupts + aren't messing with the 8139. */ +static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 support; + u8 cfg3, cfg5; + + support = ((rtl_chip_info[tp->chipset].flags & HasLWake) + ? (WAKE_PHY | WAKE_MAGIC + | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST) + : 0); + if (wol->wolopts & ~support) + return -EINVAL; + + spin_lock_irq(&tp->lock); + cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic); + if (wol->wolopts & WAKE_PHY) + cfg3 |= Cfg3_LinkUp; + if (wol->wolopts & WAKE_MAGIC) + cfg3 |= Cfg3_Magic; + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config3, cfg3); + RTL_W8 (Cfg9346, Cfg9346_Lock); + + cfg5 = RTL_R8 (Config5) & ~(Cfg5_UWF | Cfg5_MWF | Cfg5_BWF); + /* (KON)FIXME: These are untested. We may have to set the + CRC0, Wakeup0 and LSBCRC0 registers too, but I have no + documentation. */ + if (wol->wolopts & WAKE_UCAST) + cfg5 |= Cfg5_UWF; + if (wol->wolopts & WAKE_MCAST) + cfg5 |= Cfg5_MWF; + if (wol->wolopts & WAKE_BCAST) + cfg5 |= Cfg5_BWF; + RTL_W8 (Config5, cfg5); /* need not unlock via Cfg9346 */ + spin_unlock_irq(&tp->lock); + + return 0; +} + +static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct rtl8139_private *tp = netdev_priv(dev); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->version, DRV_VERSION, sizeof(info->version)); + strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); +} + +static int rtl8139_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct rtl8139_private *tp = netdev_priv(dev); + spin_lock_irq(&tp->lock); + mii_ethtool_get_link_ksettings(&tp->mii, cmd); + spin_unlock_irq(&tp->lock); + return 0; +} + +static int rtl8139_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int rc; + spin_lock_irq(&tp->lock); + rc = mii_ethtool_set_link_ksettings(&tp->mii, cmd); + spin_unlock_irq(&tp->lock); + return rc; +} + +static int rtl8139_nway_reset(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + return mii_nway_restart(&tp->mii); +} + +static u32 rtl8139_get_link(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + return mii_link_ok(&tp->mii); +} + +static u32 rtl8139_get_msglevel(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + return tp->msg_enable; +} + +static void rtl8139_set_msglevel(struct net_device *dev, u32 datum) +{ + struct rtl8139_private *tp = netdev_priv(dev); + tp->msg_enable = datum; +} + +static int rtl8139_get_regs_len(struct net_device *dev) +{ + struct rtl8139_private *tp; + /* TODO: we are too slack to do reg dumping for pio, for now */ + if (use_io) + return 0; + tp = netdev_priv(dev); + return tp->regs_len; +} + +static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) +{ + struct rtl8139_private *tp; + + /* TODO: we are too slack to do reg dumping for pio, for now */ + if (use_io) + return; + tp = netdev_priv(dev); + + regs->version = RTL_REGS_VER; + + spin_lock_irq(&tp->lock); + memcpy_fromio(regbuf, tp->mmio_addr, regs->len); + spin_unlock_irq(&tp->lock); +} + +static int rtl8139_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return RTL_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) +{ + struct rtl8139_private *tp = netdev_priv(dev); + + data[0] = tp->xstats.early_rx; + data[1] = tp->xstats.tx_buf_mapped; + data[2] = tp->xstats.tx_timeouts; + data[3] = tp->xstats.rx_lost_in_ring; +} + +static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); +} + +static const struct ethtool_ops rtl8139_ethtool_ops = { + .get_drvinfo = rtl8139_get_drvinfo, + .get_regs_len = rtl8139_get_regs_len, + .get_regs = rtl8139_get_regs, + .nway_reset = rtl8139_nway_reset, + .get_link = rtl8139_get_link, + .get_msglevel = rtl8139_get_msglevel, + .set_msglevel = rtl8139_set_msglevel, + .get_wol = rtl8139_get_wol, + .set_wol = rtl8139_set_wol, + .get_strings = rtl8139_get_strings, + .get_sset_count = rtl8139_get_sset_count, + .get_ethtool_stats = rtl8139_get_ethtool_stats, + .get_link_ksettings = rtl8139_get_link_ksettings, + .set_link_ksettings = rtl8139_set_link_ksettings, +}; + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int rc; + + if (tp->ecdev || !netif_running(dev)) + return -EINVAL; + + spin_lock_irq(&tp->lock); + rc = generic_mii_ioctl(&tp->mii, if_mii(rq), cmd, NULL); + spin_unlock_irq(&tp->lock); + + return rc; +} + + +static void +rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + unsigned int start; + + if (tp->ecdev || netif_running(dev)) { + spin_lock_irqsave (&tp->lock, flags); + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + spin_unlock_irqrestore (&tp->lock, flags); + } + + netdev_stats_to_stats64(stats, &dev->stats); + + do { + start = u64_stats_fetch_begin(&tp->rx_stats.syncp); + stats->rx_packets = tp->rx_stats.packets; + stats->rx_bytes = tp->rx_stats.bytes; + } while (u64_stats_fetch_retry(&tp->rx_stats.syncp, start)); + + do { + start = u64_stats_fetch_begin(&tp->tx_stats.syncp); + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry(&tp->tx_stats.syncp, start)); +} + +/* Set or clear the multicast filter for this adaptor. + This routine is not state sensitive and need not be SMP locked. */ + +static void __set_rx_mode (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp; + + netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n", + dev->flags, RTL_R32(RxConfig)); + + /* Note: do not reorder, GCC is clever about common statements. */ + if (dev->flags & IFF_PROMISC) { + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + + /* We can safely update without stopping the chip. */ + tmp = rtl8139_rx_config | rx_mode; + if (tp->rx_config != tmp) { + RTL_W32_F (RxConfig, tmp); + tp->rx_config = tmp; + } + RTL_W32_F (MAR0 + 0, mc_filter[0]); + RTL_W32_F (MAR0 + 4, mc_filter[1]); +} + +static void rtl8139_set_rx_mode (struct net_device *dev) +{ + unsigned long flags; + struct rtl8139_private *tp = netdev_priv(dev); + + spin_lock_irqsave (&tp->lock, flags); + __set_rx_mode(dev); + spin_unlock_irqrestore (&tp->lock, flags); +} + +static int __maybe_unused rtl8139_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + if (tp->ecdev || !netif_running (dev)) + return 0; + + netif_device_detach (dev); + + spin_lock_irqsave (&tp->lock, flags); + + /* Disable interrupts, stop Tx and Rx. */ + RTL_W16 (IntrMask, 0); + RTL_W8 (ChipCmd, 0); + + /* Update the error counts. */ + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + + spin_unlock_irqrestore (&tp->lock, flags); + + return 0; +} + +static int __maybe_unused rtl8139_resume(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct rtl8139_private *tp = netdev_priv(dev); + + if (tp->ecdev || !netif_running (dev)) + return 0; + + rtl8139_init_ring (dev); + rtl8139_hw_start (dev); + netif_device_attach (dev); + return 0; +} + +static SIMPLE_DEV_PM_OPS(rtl8139_pm_ops, rtl8139_suspend, rtl8139_resume); + +static struct pci_driver rtl8139_pci_driver = { + .name = DRV_NAME, + .id_table = rtl8139_pci_tbl, + .probe = rtl8139_init_one, + .remove = rtl8139_remove_one, + .driver.pm = &rtl8139_pm_ops, +}; + + +static int __init rtl8139_init_module (void) +{ + /* when we're a module, we always print a version message, + * even if no 8139 board is found. + */ +#ifdef MODULE + pr_info(RTL8139_DRIVER_NAME "\n"); +#endif + + return pci_register_driver(&rtl8139_pci_driver); +} + + +static void __exit rtl8139_cleanup_module (void) +{ + pci_unregister_driver (&rtl8139_pci_driver); +} + + +module_init(rtl8139_init_module); +module_exit(rtl8139_cleanup_module); diff --git a/devices/8139too-6.1-orig.c b/devices/8139too-6.1-orig.c new file mode 100644 index 00000000..469e2e22 --- /dev/null +++ b/devices/8139too-6.1-orig.c @@ -0,0 +1,2678 @@ +/* + + 8139too.c: A RealTek RTL-8139 Fast Ethernet driver for Linux. + + Maintained by Jeff Garzik + Copyright 2000-2002 Jeff Garzik + + Much code comes from Donald Becker's rtl8139.c driver, + versions 1.13 and older. This driver was originally based + on rtl8139.c version 1.07. Header of rtl8139.c version 1.13: + + ---------- + + Written 1997-2001 by Donald Becker. + This software may be used and distributed according to the + terms of the GNU General Public License (GPL), incorporated + herein by reference. Drivers based on or derived from this + code fall under the GPL and must retain the authorship, + copyright and license notice. This file is not a complete + program and may only be used when the entire operating + system is licensed under the GPL. + + This driver is for boards based on the RTL8129 and RTL8139 + PCI ethernet chips. + + The author may be reached as becker@scyld.com, or C/O Scyld + Computing Corporation 410 Severn Ave., Suite 210 Annapolis + MD 21403 + + Support and updates available at + http://www.scyld.com/network/rtl8139.html + + Twister-tuning table provided by Kinston + . + + ---------- + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Contributors: + + Donald Becker - he wrote the original driver, kudos to him! + (but please don't e-mail him for support, this isn't his driver) + + Tigran Aivazian - bug fixes, skbuff free cleanup + + Martin Mares - suggestions for PCI cleanup + + David S. Miller - PCI DMA and softnet updates + + Ernst Gill - fixes ported from BSD driver + + Daniel Kobras - identified specific locations of + posted MMIO write bugginess + + Gerard Sharp - bug fix, testing and feedback + + David Ford - Rx ring wrap fix + + Dan DeMaggio - swapped RTL8139 cards with me, and allowed me + to find and fix a crucial bug on older chipsets. + + Donald Becker/Chris Butterworth/Marcus Westergren - + Noticed various Rx packet size-related buglets. + + Santiago Garcia Mantinan - testing and feedback + + Jens David - 2.2.x kernel backports + + Martin Dennett - incredibly helpful insight on undocumented + features of the 8139 chips + + Jean-Jacques Michel - bug fix + + Tobias Ringström - Rx interrupt status checking suggestion + + Andrew Morton - Clear blocked signals, avoid + buffer overrun setting current->comm. + + Kalle Olavi Niemitalo - Wake-on-LAN ioctls + + Robert Kuebel - Save kernel thread from dying on any signal. + + Submitting bug reports: + + "rtl8139-diag -mmmaaavvveefN" output + enable RTL8139_DEBUG below, and look at 'dmesg' or kernel log + +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "8139too" +#define DRV_VERSION "0.9.28" + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION + +/* Default Message level */ +#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + + +/* define to 1, 2 or 3 to enable copious debugging info */ +#define RTL8139_DEBUG 0 + +/* define to 1 to disable lightweight runtime debugging checks */ +#undef RTL8139_NDEBUG + + +#ifdef RTL8139_NDEBUG +# define assert(expr) do {} while (0) +#else +# define assert(expr) \ + if (unlikely(!(expr))) { \ + pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ + #expr, __FILE__, __func__, __LINE__); \ + } +#endif + + +/* A few user-configurable values. */ +/* media options */ +#define MAX_UNITS 8 +static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; + +/* Whether to use MMIO or PIO. Default to MMIO. */ +#ifdef CONFIG_8139TOO_PIO +static bool use_io = true; +#else +static bool use_io = false; +#endif + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +static int multicast_filter_limit = 32; + +/* bitmapped message enable number */ +static int debug = -1; + +/* + * Receive ring size + * Warning: 64K ring has hardware issues and may lock up. + */ +#if defined(CONFIG_SH_DREAMCAST) +#define RX_BUF_IDX 0 /* 8K ring */ +#else +#define RX_BUF_IDX 2 /* 32K ring */ +#endif +#define RX_BUF_LEN (8192 << RX_BUF_IDX) +#define RX_BUF_PAD 16 +#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */ + +#if RX_BUF_LEN == 65536 +#define RX_BUF_TOT_LEN RX_BUF_LEN +#else +#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD) +#endif + +/* Number of Tx descriptor registers. */ +#define NUM_TX_DESC 4 + +/* max supported ethernet frame size -- must be at least (dev->mtu+18+4).*/ +#define MAX_ETH_FRAME_SIZE 1792 + +/* max supported payload size */ +#define MAX_ETH_DATA_SIZE (MAX_ETH_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN) + +/* Size of the Tx bounce buffers -- must be at least (dev->mtu+18+4). */ +#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE +#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC) + +/* PCI Tuning Parameters + Threshold is bytes transferred to chip before transmission starts. */ +#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */ + +/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ +#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */ +#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */ +#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ +#define TX_RETRY 8 /* 0-15. retries = 16 + (TX_RETRY * 16) */ + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (6*HZ) + + +enum { + HAS_MII_XCVR = 0x010000, + HAS_CHIP_XCVR = 0x020000, + HAS_LNK_CHNG = 0x040000, +}; + +#define RTL_NUM_STATS 4 /* number of ETHTOOL_GSTATS u64's */ +#define RTL_REGS_VER 1 /* version of reg. data in ETHTOOL_GREGS */ +#define RTL_MIN_IO_SIZE 0x80 +#define RTL8139B_IO_SIZE 256 + +#define RTL8129_CAPS HAS_MII_XCVR +#define RTL8139_CAPS (HAS_CHIP_XCVR|HAS_LNK_CHNG) + +typedef enum { + RTL8139 = 0, + RTL8129, +} board_t; + + +/* indexed by board_t, above */ +static const struct { + const char *name; + u32 hw_flags; +} board_info[] = { + { "RealTek RTL8139", RTL8139_CAPS }, + { "RealTek RTL8129", RTL8129_CAPS }, +}; + + +static const struct pci_device_id rtl8139_pci_tbl[] = { + {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1259, 0xa11e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x11db, 0x1234, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1432, 0x9130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x02ac, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x16ec, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + +#ifdef CONFIG_SH_SECUREEDGE5410 + /* Bogus 8139 silicon reports 8129 without external PROM :-( */ + {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, +#endif +#ifdef CONFIG_8139TOO_8129 + {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 }, +#endif + + /* some crazy cards report invalid vendor ids like + * 0x0001 here. The other ids are valid and constant, + * so we simply don't match on the main vendor id. + */ + {PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 }, + {PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, RTL8139 }, + {PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, RTL8139 }, + + {0,} +}; +MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl); + +static struct { + const char str[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "early_rx" }, + { "tx_buf_mapped" }, + { "tx_timeouts" }, + { "rx_lost_in_ring" }, +}; + +/* The rest of these values should never change. */ + +/* Symbolic offsets to registers. */ +enum RTL8139_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAR0 = 8, /* Multicast filter. */ + TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */ + TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */ + RxBuf = 0x30, + ChipCmd = 0x37, + RxBufPtr = 0x38, + RxBufAddr = 0x3A, + IntrMask = 0x3C, + IntrStatus = 0x3E, + TxConfig = 0x40, + RxConfig = 0x44, + Timer = 0x48, /* A general-purpose counter. */ + RxMissed = 0x4C, /* 24 bits valid, write clears. */ + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + TimerInt = 0x54, + MediaStatus = 0x58, + Config3 = 0x59, + Config4 = 0x5A, /* absent on RTL-8139A */ + HltClk = 0x5B, + MultiIntr = 0x5C, + TxSummary = 0x60, + BasicModeCtrl = 0x62, + BasicModeStatus = 0x64, + NWayAdvert = 0x66, + NWayLPAR = 0x68, + NWayExpansion = 0x6A, + /* Undocumented registers, but required for proper operation. */ + FIFOTMS = 0x70, /* FIFO Control and test. */ + CSCR = 0x74, /* Chip Status and Configuration Register. */ + PARA78 = 0x78, + FlashReg = 0xD4, /* Communication with Flash ROM, four bytes. */ + PARA7c = 0x7c, /* Magic transceiver parameter register. */ + Config5 = 0xD8, /* absent on RTL-8139A */ +}; + +enum ClearBitMasks { + MultiIntrClear = 0xF000, + ChipCmdClear = 0xE2, + Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1), +}; + +enum ChipCmdBits { + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, +}; + +/* Interrupt register bits, using my own meaningful names. */ +enum IntrStatusBits { + PCIErr = 0x8000, + PCSTimeout = 0x4000, + RxFIFOOver = 0x40, + RxUnderrun = 0x20, + RxOverflow = 0x10, + TxErr = 0x08, + TxOK = 0x04, + RxErr = 0x02, + RxOK = 0x01, + + RxAckBits = RxFIFOOver | RxOverflow | RxOK, +}; + +enum TxStatusBits { + TxHostOwns = 0x2000, + TxUnderrun = 0x4000, + TxStatOK = 0x8000, + TxOutOfWindow = 0x20000000, + TxAborted = 0x40000000, + TxCarrierLost = 0x80000000, +}; +enum RxStatusBits { + RxMulticast = 0x8000, + RxPhysical = 0x4000, + RxBroadcast = 0x2000, + RxBadSymbol = 0x0020, + RxRunt = 0x0010, + RxTooLong = 0x0008, + RxCRCErr = 0x0004, + RxBadAlign = 0x0002, + RxStatusOK = 0x0001, +}; + +/* Bits in RxConfig. */ +enum rx_mode_bits { + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +}; + +/* Bits in TxConfig. */ +enum tx_config_bits { + /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */ + TxIFGShift = 24, + TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */ + TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */ + TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */ + TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */ + + TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */ + TxCRC = (1 << 16), /* DISABLE Tx pkt CRC append */ + TxClearAbt = (1 << 0), /* Clear abort (WO) */ + TxDMAShift = 8, /* DMA burst value (0-7) is shifted X many bits */ + TxRetryShift = 4, /* TXRR value (0-15) is shifted X many bits */ + + TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */ +}; + +/* Bits in Config1 */ +enum Config1Bits { + Cfg1_PM_Enable = 0x01, + Cfg1_VPD_Enable = 0x02, + Cfg1_PIO = 0x04, + Cfg1_MMIO = 0x08, + LWAKE = 0x10, /* not on 8139, 8139A */ + Cfg1_Driver_Load = 0x20, + Cfg1_LED0 = 0x40, + Cfg1_LED1 = 0x80, + SLEEP = (1 << 1), /* only on 8139, 8139A */ + PWRDN = (1 << 0), /* only on 8139, 8139A */ +}; + +/* Bits in Config3 */ +enum Config3Bits { + Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */ + Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */ + Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */ + Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */ + Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */ + Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */ + Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */ + Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */ +}; + +/* Bits in Config4 */ +enum Config4Bits { + LWPTN = (1 << 2), /* not on 8139, 8139A */ +}; + +/* Bits in Config5 */ +enum Config5Bits { + Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */ + Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */ + Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */ + Cfg5_FIFOAddrPtr= (1 << 3), /* Realtek internal SRAM testing */ + Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */ + Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */ + Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */ +}; + +enum RxConfigBits { + /* rx fifo threshold */ + RxCfgFIFOShift = 13, + RxCfgFIFONone = (7 << RxCfgFIFOShift), + + /* Max DMA burst */ + RxCfgDMAShift = 8, + RxCfgDMAUnlimited = (7 << RxCfgDMAShift), + + /* rx ring buffer length */ + RxCfgRcv8K = 0, + RxCfgRcv16K = (1 << 11), + RxCfgRcv32K = (1 << 12), + RxCfgRcv64K = (1 << 11) | (1 << 12), + + /* Disable packet wrap at end of Rx buffer. (not possible with 64k) */ + RxNoWrap = (1 << 7), +}; + +/* Twister tuning parameters from RealTek. + Completely undocumented, but required to tune bad links on some boards. */ +enum CSCRBits { + CSCR_LinkOKBit = 0x0400, + CSCR_LinkChangeBit = 0x0800, + CSCR_LinkStatusBits = 0x0f000, + CSCR_LinkDownOffCmd = 0x003c0, + CSCR_LinkDownCmd = 0x0f3c0, +}; + +enum Cfg9346Bits { + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xC0, +}; + +typedef enum { + CH_8139 = 0, + CH_8139_K, + CH_8139A, + CH_8139A_G, + CH_8139B, + CH_8130, + CH_8139C, + CH_8100, + CH_8100B_8139D, + CH_8101, +} chip_t; + +enum chip_flags { + HasHltClk = (1 << 0), + HasLWake = (1 << 1), +}; + +#define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \ + (b30<<30 | b29<<29 | b28<<28 | b27<<27 | b26<<26 | b23<<23 | b22<<22) +#define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1) + +/* directly indexed by chip_t, above */ +static const struct { + const char *name; + u32 version; /* from RTL8139C/RTL8139D docs */ + u32 flags; +} rtl_chip_info[] = { + { "RTL-8139", + HW_REVID(1, 0, 0, 0, 0, 0, 0), + HasHltClk, + }, + + { "RTL-8139 rev K", + HW_REVID(1, 1, 0, 0, 0, 0, 0), + HasHltClk, + }, + + { "RTL-8139A", + HW_REVID(1, 1, 1, 0, 0, 0, 0), + HasHltClk, /* XXX undocumented? */ + }, + + { "RTL-8139A rev G", + HW_REVID(1, 1, 1, 0, 0, 1, 0), + HasHltClk, /* XXX undocumented? */ + }, + + { "RTL-8139B", + HW_REVID(1, 1, 1, 1, 0, 0, 0), + HasLWake, + }, + + { "RTL-8130", + HW_REVID(1, 1, 1, 1, 1, 0, 0), + HasLWake, + }, + + { "RTL-8139C", + HW_REVID(1, 1, 1, 0, 1, 0, 0), + HasLWake, + }, + + { "RTL-8100", + HW_REVID(1, 1, 1, 1, 0, 1, 0), + HasLWake, + }, + + { "RTL-8100B/8139D", + HW_REVID(1, 1, 1, 0, 1, 0, 1), + HasHltClk /* XXX undocumented? */ + | HasLWake, + }, + + { "RTL-8101", + HW_REVID(1, 1, 1, 0, 1, 1, 1), + HasLWake, + }, +}; + +struct rtl_extra_stats { + unsigned long early_rx; + unsigned long tx_buf_mapped; + unsigned long tx_timeouts; + unsigned long rx_lost_in_ring; +}; + +struct rtl8139_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct rtl8139_private { + void __iomem *mmio_addr; + int drv_flags; + struct pci_dev *pci_dev; + u32 msg_enable; + struct napi_struct napi; + struct net_device *dev; + + unsigned char *rx_ring; + unsigned int cur_rx; /* RX buf index of next pkt */ + struct rtl8139_stats rx_stats; + dma_addr_t rx_ring_dma; + + unsigned int tx_flag; + unsigned long cur_tx; + unsigned long dirty_tx; + struct rtl8139_stats tx_stats; + unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */ + unsigned char *tx_bufs; /* Tx bounce buffer region. */ + dma_addr_t tx_bufs_dma; + + signed char phys[4]; /* MII device addresses. */ + + /* Twister tune state. */ + char twistie, twist_row, twist_col; + + unsigned int watchdog_fired : 1; + unsigned int default_port : 4; /* Last dev->if_port value. */ + unsigned int have_thread : 1; + + spinlock_t lock; + spinlock_t rx_lock; + + chip_t chipset; + u32 rx_config; + struct rtl_extra_stats xstats; + + struct delayed_work thread; + + struct mii_if_info mii; + unsigned int regs_len; + unsigned long fifo_copy_timeout; +}; + +MODULE_AUTHOR ("Jeff Garzik "); +MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +module_param(use_io, bool, 0); +MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO"); +module_param(multicast_filter_limit, int, 0); +module_param_array(media, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +module_param(debug, int, 0); +MODULE_PARM_DESC (debug, "8139too bitmapped message enable number"); +MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered multicast addresses"); +MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps"); +MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)"); + +static int read_eeprom (void __iomem *ioaddr, int location, int addr_len); +static int rtl8139_open (struct net_device *dev); +static int mdio_read (struct net_device *dev, int phy_id, int location); +static void mdio_write (struct net_device *dev, int phy_id, int location, + int val); +static void rtl8139_start_thread(struct rtl8139_private *tp); +static void rtl8139_tx_timeout (struct net_device *dev, unsigned int txqueue); +static void rtl8139_init_ring (struct net_device *dev); +static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, + struct net_device *dev); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8139_poll_controller(struct net_device *dev); +#endif +static int rtl8139_set_mac_address(struct net_device *dev, void *p); +static int rtl8139_poll(struct napi_struct *napi, int budget); +static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); +static int rtl8139_close (struct net_device *dev); +static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); +static void rtl8139_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static void rtl8139_set_rx_mode (struct net_device *dev); +static void __set_rx_mode (struct net_device *dev); +static void rtl8139_hw_start (struct net_device *dev); +static void rtl8139_thread (struct work_struct *work); +static void rtl8139_tx_timeout_task(struct work_struct *work); +static const struct ethtool_ops rtl8139_ethtool_ops; + +/* write MMIO register, with flush */ +/* Flush avoids rtl8139 bug w/ posted MMIO writes */ +#define RTL_W8_F(reg, val8) do { iowrite8 ((val8), ioaddr + (reg)); ioread8 (ioaddr + (reg)); } while (0) +#define RTL_W16_F(reg, val16) do { iowrite16 ((val16), ioaddr + (reg)); ioread16 (ioaddr + (reg)); } while (0) +#define RTL_W32_F(reg, val32) do { iowrite32 ((val32), ioaddr + (reg)); ioread32 (ioaddr + (reg)); } while (0) + +/* write MMIO register */ +#define RTL_W8(reg, val8) iowrite8 ((val8), ioaddr + (reg)) +#define RTL_W16(reg, val16) iowrite16 ((val16), ioaddr + (reg)) +#define RTL_W32(reg, val32) iowrite32 ((val32), ioaddr + (reg)) + +/* read MMIO register */ +#define RTL_R8(reg) ioread8 (ioaddr + (reg)) +#define RTL_R16(reg) ioread16 (ioaddr + (reg)) +#define RTL_R32(reg) ioread32 (ioaddr + (reg)) + + +static const u16 rtl8139_intr_mask = + PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver | + TxErr | TxOK | RxErr | RxOK; + +static const u16 rtl8139_norx_intr_mask = + PCIErr | PCSTimeout | RxUnderrun | + TxErr | TxOK | RxErr ; + +#if RX_BUF_IDX == 0 +static const unsigned int rtl8139_rx_config = + RxCfgRcv8K | RxNoWrap | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#elif RX_BUF_IDX == 1 +static const unsigned int rtl8139_rx_config = + RxCfgRcv16K | RxNoWrap | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#elif RX_BUF_IDX == 2 +static const unsigned int rtl8139_rx_config = + RxCfgRcv32K | RxNoWrap | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#elif RX_BUF_IDX == 3 +static const unsigned int rtl8139_rx_config = + RxCfgRcv64K | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#else +#error "Invalid configuration for 8139_RXBUF_IDX" +#endif + +static const unsigned int rtl8139_tx_config = + TxIFG96 | (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift); + +static void __rtl8139_cleanup_dev (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + struct pci_dev *pdev; + + assert (dev != NULL); + assert (tp->pci_dev != NULL); + pdev = tp->pci_dev; + + if (tp->mmio_addr) + pci_iounmap (pdev, tp->mmio_addr); + + /* it's ok to call this even if we have no regions to free */ + pci_release_regions (pdev); + + free_netdev(dev); +} + + +static void rtl8139_chip_reset (void __iomem *ioaddr) +{ + int i; + + /* Soft reset the chip. */ + RTL_W8 (ChipCmd, CmdReset); + + /* Check that the chip has finished the reset. */ + for (i = 1000; i > 0; i--) { + barrier(); + if ((RTL_R8 (ChipCmd) & CmdReset) == 0) + break; + udelay (10); + } +} + + +static struct net_device *rtl8139_init_board(struct pci_dev *pdev) +{ + struct device *d = &pdev->dev; + void __iomem *ioaddr; + struct net_device *dev; + struct rtl8139_private *tp; + u8 tmp8; + int rc, disable_dev_on_err = 0; + unsigned int i, bar; + unsigned long io_len; + u32 version; + static const struct { + unsigned long mask; + char *type; + } res[] = { + { IORESOURCE_IO, "PIO" }, + { IORESOURCE_MEM, "MMIO" } + }; + + assert (pdev != NULL); + + /* dev and priv zeroed in alloc_etherdev */ + dev = alloc_etherdev (sizeof (*tp)); + if (dev == NULL) + return ERR_PTR(-ENOMEM); + + SET_NETDEV_DEV(dev, &pdev->dev); + + tp = netdev_priv(dev); + tp->pci_dev = pdev; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device (pdev); + if (rc) + goto err_out; + + disable_dev_on_err = 1; + rc = pci_request_regions (pdev, DRV_NAME); + if (rc) + goto err_out; + + pci_set_master (pdev); + + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); + +retry: + /* PIO bar register comes first. */ + bar = !use_io; + + io_len = pci_resource_len(pdev, bar); + + dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len); + + if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) { + dev_err(d, "region #%d not a %s resource, aborting\n", bar, + res[bar].type); + rc = -ENODEV; + goto err_out; + } + if (io_len < RTL_MIN_IO_SIZE) { + dev_err(d, "Invalid PCI %s region size(s), aborting\n", + res[bar].type); + rc = -ENODEV; + goto err_out; + } + + ioaddr = pci_iomap(pdev, bar, 0); + if (!ioaddr) { + dev_err(d, "cannot map %s\n", res[bar].type); + if (!use_io) { + use_io = true; + goto retry; + } + rc = -ENODEV; + goto err_out; + } + tp->regs_len = io_len; + tp->mmio_addr = ioaddr; + + /* Bring old chips out of low-power mode. */ + RTL_W8 (HltClk, 'R'); + + /* check for missing/broken hardware */ + if (RTL_R32 (TxConfig) == 0xFFFFFFFF) { + dev_err(&pdev->dev, "Chip not responding, ignoring board\n"); + rc = -EIO; + goto err_out; + } + + /* identify chip attached to board */ + version = RTL_R32 (TxConfig) & HW_REVID_MASK; + for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++) + if (version == rtl_chip_info[i].version) { + tp->chipset = i; + goto match; + } + + /* if unknown chip, assume array element #0, original RTL-8139 in this case */ + i = 0; + dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n"); + dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig)); + tp->chipset = 0; + +match: + pr_debug("chipset id (%d) == index %d, '%s'\n", + version, i, rtl_chip_info[i].name); + + if (tp->chipset >= CH_8139B) { + u8 new_tmp8 = tmp8 = RTL_R8 (Config1); + pr_debug("PCI PM wakeup\n"); + if ((rtl_chip_info[tp->chipset].flags & HasLWake) && + (tmp8 & LWAKE)) + new_tmp8 &= ~LWAKE; + new_tmp8 |= Cfg1_PM_Enable; + if (new_tmp8 != tmp8) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config1, tmp8); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } + if (rtl_chip_info[tp->chipset].flags & HasLWake) { + tmp8 = RTL_R8 (Config4); + if (tmp8 & LWPTN) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config4, tmp8 & ~LWPTN); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } + } + } else { + pr_debug("Old chip wakeup\n"); + tmp8 = RTL_R8 (Config1); + tmp8 &= ~(SLEEP | PWRDN); + RTL_W8 (Config1, tmp8); + } + + rtl8139_chip_reset (ioaddr); + + return dev; + +err_out: + __rtl8139_cleanup_dev (dev); + if (disable_dev_on_err) + pci_disable_device (pdev); + return ERR_PTR(rc); +} + +static int rtl8139_set_features(struct net_device *dev, netdev_features_t features) +{ + struct rtl8139_private *tp = netdev_priv(dev); + unsigned long flags; + netdev_features_t changed = features ^ dev->features; + void __iomem *ioaddr = tp->mmio_addr; + + if (!(changed & (NETIF_F_RXALL))) + return 0; + + spin_lock_irqsave(&tp->lock, flags); + + if (changed & NETIF_F_RXALL) { + int rx_mode = tp->rx_config; + if (features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + else + rx_mode &= ~(AcceptErr | AcceptRunt); + tp->rx_config = rtl8139_rx_config | rx_mode; + RTL_W32_F(RxConfig, tp->rx_config); + } + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + +static const struct net_device_ops rtl8139_netdev_ops = { + .ndo_open = rtl8139_open, + .ndo_stop = rtl8139_close, + .ndo_get_stats64 = rtl8139_get_stats64, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = rtl8139_set_mac_address, + .ndo_start_xmit = rtl8139_start_xmit, + .ndo_set_rx_mode = rtl8139_set_rx_mode, + .ndo_eth_ioctl = netdev_ioctl, + .ndo_tx_timeout = rtl8139_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8139_poll_controller, +#endif + .ndo_set_features = rtl8139_set_features, +}; + +static int rtl8139_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev = NULL; + struct rtl8139_private *tp; + __le16 addr[ETH_ALEN / 2]; + int i, addr_len, option; + void __iomem *ioaddr; + static int board_idx = -1; + + assert (pdev != NULL); + assert (ent != NULL); + + board_idx++; + + /* when we're built into the kernel, the driver version message + * is only printed if at least one 8139 board has been found + */ +#ifndef MODULE + { + static int printed_version; + if (!printed_version++) + pr_info(RTL8139_DRIVER_NAME "\n"); + } +#endif + + if (pdev->vendor == PCI_VENDOR_ID_REALTEK && + pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) { + dev_info(&pdev->dev, + "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip, use 8139cp\n", + pdev->vendor, pdev->device, pdev->revision); + return -ENODEV; + } + + if (pdev->vendor == PCI_VENDOR_ID_REALTEK && + pdev->device == PCI_DEVICE_ID_REALTEK_8139 && + pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS && + pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) { + pr_info("OQO Model 2 detected. Forcing PIO\n"); + use_io = true; + } + + dev = rtl8139_init_board (pdev); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + assert (dev != NULL); + tp = netdev_priv(dev); + tp->dev = dev; + + ioaddr = tp->mmio_addr; + assert (ioaddr != NULL); + + addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6; + for (i = 0; i < 3; i++) + addr[i] = cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len)); + eth_hw_addr_set(dev, (u8 *)addr); + + /* The Rtl8139-specific entries in the device structure. */ + dev->netdev_ops = &rtl8139_netdev_ops; + dev->ethtool_ops = &rtl8139_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + netif_napi_add(dev, &tp->napi, rtl8139_poll); + + /* note: the hardware is not capable of sg/csum/highdma, however + * through the use of skb_copy_and_csum_dev we enable these + * features + */ + dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; + dev->vlan_features = dev->features; + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* MTU range: 68 - 1770 */ + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = MAX_ETH_DATA_SIZE; + + /* tp zeroed and aligned in alloc_etherdev */ + tp = netdev_priv(dev); + + /* note: tp->chipset set in rtl8139_init_board */ + tp->drv_flags = board_info[ent->driver_data].hw_flags; + tp->mmio_addr = ioaddr; + tp->msg_enable = + (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); + spin_lock_init (&tp->lock); + spin_lock_init (&tp->rx_lock); + INIT_DELAYED_WORK(&tp->thread, rtl8139_thread); + tp->mii.dev = dev; + tp->mii.mdio_read = mdio_read; + tp->mii.mdio_write = mdio_write; + tp->mii.phy_id_mask = 0x3f; + tp->mii.reg_num_mask = 0x1f; + + /* dev is fully set up and ready to use now */ + pr_debug("about to register device named %s (%p)...\n", + dev->name, dev); + i = register_netdev (dev); + if (i) goto err_out; + + pci_set_drvdata (pdev, dev); + + netdev_info(dev, "%s at 0x%p, %pM, IRQ %d\n", + board_info[ent->driver_data].name, + ioaddr, dev->dev_addr, pdev->irq); + + netdev_dbg(dev, "Identified 8139 chip type '%s'\n", + rtl_chip_info[tp->chipset].name); + + /* Find the connected MII xcvrs. + Doing this in open() would allow detecting external xcvrs later, but + takes too much time. */ +#ifdef CONFIG_8139TOO_8129 + if (tp->drv_flags & HAS_MII_XCVR) { + int phy, phy_idx = 0; + for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) { + int mii_status = mdio_read(dev, phy, 1); + if (mii_status != 0xffff && mii_status != 0x0000) { + u16 advertising = mdio_read(dev, phy, 4); + tp->phys[phy_idx++] = phy; + netdev_info(dev, "MII transceiver %d status 0x%04x advertising %04x\n", + phy, mii_status, advertising); + } + } + if (phy_idx == 0) { + netdev_info(dev, "No MII transceivers found! Assuming SYM transceiver\n"); + tp->phys[0] = 32; + } + } else +#endif + tp->phys[0] = 32; + tp->mii.phy_id = tp->phys[0]; + + /* The lower four bits are the media type. */ + option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx]; + if (option > 0) { + tp->mii.full_duplex = (option & 0x210) ? 1 : 0; + tp->default_port = option & 0xFF; + if (tp->default_port) + tp->mii.force_media = 1; + } + if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0) + tp->mii.full_duplex = full_duplex[board_idx]; + if (tp->mii.full_duplex) { + netdev_info(dev, "Media type forced to Full Duplex\n"); + /* Changing the MII-advertised media because might prevent + re-connection. */ + tp->mii.force_media = 1; + } + if (tp->default_port) { + netdev_info(dev, " Forcing %dMbps %s-duplex operation\n", + (option & 0x20 ? 100 : 10), + (option & 0x10 ? "full" : "half")); + mdio_write(dev, tp->phys[0], 0, + ((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */ + ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */ + } + + /* Put the chip into low-power mode. */ + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */ + + return 0; + +err_out: + __rtl8139_cleanup_dev (dev); + pci_disable_device (pdev); + return i; +} + + +static void rtl8139_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct rtl8139_private *tp = netdev_priv(dev); + + assert (dev != NULL); + + cancel_delayed_work_sync(&tp->thread); + + unregister_netdev (dev); + + __rtl8139_cleanup_dev (dev); + pci_disable_device (pdev); +} + + +/* Serial EEPROM section. */ + +/* EEPROM_Ctrl bits. */ +#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ +#define EE_CS 0x08 /* EEPROM chip select. */ +#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */ +#define EE_WRITE_0 0x00 +#define EE_WRITE_1 0x02 +#define EE_DATA_READ 0x01 /* EEPROM chip data out. */ +#define EE_ENB (0x80 | EE_CS) + +/* Delay between EEPROM clock transitions. + No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. + */ + +#define eeprom_delay() (void)RTL_R8(Cfg9346) + +/* The EEPROM commands include the alway-set leading bit. */ +#define EE_WRITE_CMD (5) +#define EE_READ_CMD (6) +#define EE_ERASE_CMD (7) + +static int read_eeprom(void __iomem *ioaddr, int location, int addr_len) +{ + int i; + unsigned retval = 0; + int read_cmd = location | (EE_READ_CMD << addr_len); + + RTL_W8 (Cfg9346, EE_ENB & ~EE_CS); + RTL_W8 (Cfg9346, EE_ENB); + eeprom_delay (); + + /* Shift the read command bits out. */ + for (i = 4 + addr_len; i >= 0; i--) { + int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; + RTL_W8 (Cfg9346, EE_ENB | dataval); + eeprom_delay (); + RTL_W8 (Cfg9346, EE_ENB | dataval | EE_SHIFT_CLK); + eeprom_delay (); + } + RTL_W8 (Cfg9346, EE_ENB); + eeprom_delay (); + + for (i = 16; i > 0; i--) { + RTL_W8 (Cfg9346, EE_ENB | EE_SHIFT_CLK); + eeprom_delay (); + retval = + (retval << 1) | ((RTL_R8 (Cfg9346) & EE_DATA_READ) ? 1 : + 0); + RTL_W8 (Cfg9346, EE_ENB); + eeprom_delay (); + } + + /* Terminate the EEPROM access. */ + RTL_W8(Cfg9346, 0); + eeprom_delay (); + + return retval; +} + +/* MII serial management: mostly bogus for now. */ +/* Read and write the MII management registers using software-generated + serial MDIO protocol. + The maximum data clock rate is 2.5 Mhz. The minimum timing is usually + met by back-to-back PCI I/O cycles, but we insert a delay to avoid + "overclocking" issues. */ +#define MDIO_DIR 0x80 +#define MDIO_DATA_OUT 0x04 +#define MDIO_DATA_IN 0x02 +#define MDIO_CLK 0x01 +#define MDIO_WRITE0 (MDIO_DIR) +#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT) + +#define mdio_delay() RTL_R8(Config4) + + +static const char mii_2_8139_map[8] = { + BasicModeCtrl, + BasicModeStatus, + 0, + 0, + NWayAdvert, + NWayLPAR, + NWayExpansion, + 0 +}; + + +#ifdef CONFIG_8139TOO_8129 +/* Syncronize the MII management interface by shifting 32 one bits out. */ +static void mdio_sync (void __iomem *ioaddr) +{ + int i; + + for (i = 32; i >= 0; i--) { + RTL_W8 (Config4, MDIO_WRITE1); + mdio_delay (); + RTL_W8 (Config4, MDIO_WRITE1 | MDIO_CLK); + mdio_delay (); + } +} +#endif + +static int mdio_read (struct net_device *dev, int phy_id, int location) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int retval = 0; +#ifdef CONFIG_8139TOO_8129 + void __iomem *ioaddr = tp->mmio_addr; + int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; + int i; +#endif + + if (phy_id > 31) { /* Really a 8139. Use internal registers. */ + void __iomem *ioaddr = tp->mmio_addr; + return location < 8 && mii_2_8139_map[location] ? + RTL_R16 (mii_2_8139_map[location]) : 0; + } + +#ifdef CONFIG_8139TOO_8129 + mdio_sync (ioaddr); + /* Shift the read command bits out. */ + for (i = 15; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0; + + RTL_W8 (Config4, MDIO_DIR | dataval); + mdio_delay (); + RTL_W8 (Config4, MDIO_DIR | dataval | MDIO_CLK); + mdio_delay (); + } + + /* Read the two transition, 16 data, and wire-idle bits. */ + for (i = 19; i > 0; i--) { + RTL_W8 (Config4, 0); + mdio_delay (); + retval = (retval << 1) | ((RTL_R8 (Config4) & MDIO_DATA_IN) ? 1 : 0); + RTL_W8 (Config4, MDIO_CLK); + mdio_delay (); + } +#endif + + return (retval >> 1) & 0xffff; +} + + +static void mdio_write (struct net_device *dev, int phy_id, int location, + int value) +{ + struct rtl8139_private *tp = netdev_priv(dev); +#ifdef CONFIG_8139TOO_8129 + void __iomem *ioaddr = tp->mmio_addr; + int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value; + int i; +#endif + + if (phy_id > 31) { /* Really a 8139. Use internal registers. */ + void __iomem *ioaddr = tp->mmio_addr; + if (location == 0) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W16 (BasicModeCtrl, value); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } else if (location < 8 && mii_2_8139_map[location]) + RTL_W16 (mii_2_8139_map[location], value); + return; + } + +#ifdef CONFIG_8139TOO_8129 + mdio_sync (ioaddr); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = + (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; + RTL_W8 (Config4, dataval); + mdio_delay (); + RTL_W8 (Config4, dataval | MDIO_CLK); + mdio_delay (); + } + /* Clear out extra bits. */ + for (i = 2; i > 0; i--) { + RTL_W8 (Config4, 0); + mdio_delay (); + RTL_W8 (Config4, MDIO_CLK); + mdio_delay (); + } +#endif +} + + +static int rtl8139_open (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + const int irq = tp->pci_dev->irq; + int retval; + + retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); + if (retval) + return retval; + + tp->tx_bufs = dma_alloc_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + &tp->tx_bufs_dma, GFP_KERNEL); + tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + &tp->rx_ring_dma, GFP_KERNEL); + if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { + free_irq(irq, dev); + + if (tp->tx_bufs) + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + tp->tx_bufs, tp->tx_bufs_dma); + if (tp->rx_ring) + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + tp->rx_ring, tp->rx_ring_dma); + + return -ENOMEM; + + } + + napi_enable(&tp->napi); + + tp->mii.full_duplex = tp->mii.force_media; + tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000; + + rtl8139_init_ring (dev); + rtl8139_hw_start (dev); + netif_start_queue (dev); + + netif_dbg(tp, ifup, dev, + "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n", + __func__, + (unsigned long long)pci_resource_start (tp->pci_dev, 1), + irq, RTL_R8 (MediaStatus), + tp->mii.full_duplex ? "full" : "half"); + + rtl8139_start_thread(tp); + + return 0; +} + + +static void rtl_check_media (struct net_device *dev, unsigned int init_media) +{ + struct rtl8139_private *tp = netdev_priv(dev); + + if (tp->phys[0] >= 0) { + mii_check_media(&tp->mii, netif_msg_link(tp), init_media); + } +} + +/* Start the hardware at open or resume. */ +static void rtl8139_hw_start (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 i; + u8 tmp; + + /* Bring old chips out of low-power mode. */ + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'R'); + + rtl8139_chip_reset (ioaddr); + + /* unlock Config[01234] and BMCR register writes */ + RTL_W8_F (Cfg9346, Cfg9346_Unlock); + /* Restore our idea of the MAC address. */ + RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); + RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4))); + + tp->cur_rx = 0; + + /* init Rx ring buffer DMA address */ + RTL_W32_F (RxBuf, tp->rx_ring_dma); + + /* Must enable Tx/Rx before setting transfer thresholds! */ + RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); + + tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys; + RTL_W32 (RxConfig, tp->rx_config); + RTL_W32 (TxConfig, rtl8139_tx_config); + + rtl_check_media (dev, 1); + + if (tp->chipset >= CH_8139B) { + /* Disable magic packet scanning, which is enabled + * when PM is enabled in Config1. It can be reenabled + * via ETHTOOL_SWOL if desired. */ + RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic); + } + + netdev_dbg(dev, "init buffer addresses\n"); + + /* Lock Config[01234] and BMCR register writes */ + RTL_W8 (Cfg9346, Cfg9346_Lock); + + /* init Tx buffer DMA addresses */ + for (i = 0; i < NUM_TX_DESC; i++) + RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs)); + + RTL_W32 (RxMissed, 0); + + rtl8139_set_rx_mode (dev); + + /* no early-rx interrupts */ + RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear); + + /* make sure RxTx has started */ + tmp = RTL_R8 (ChipCmd); + if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb))) + RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); + + /* Enable all known interrupts by setting the interrupt mask. */ + RTL_W16 (IntrMask, rtl8139_intr_mask); +} + + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void rtl8139_init_ring (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int i; + + tp->cur_rx = 0; + tp->cur_tx = 0; + tp->dirty_tx = 0; + + for (i = 0; i < NUM_TX_DESC; i++) + tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE]; +} + + +/* This must be global for CONFIG_8139TOO_TUNE_TWISTER case */ +static int next_tick = 3 * HZ; + +#ifndef CONFIG_8139TOO_TUNE_TWISTER +static inline void rtl8139_tune_twister (struct net_device *dev, + struct rtl8139_private *tp) {} +#else +enum TwisterParamVals { + PARA78_default = 0x78fa8388, + PARA7c_default = 0xcb38de43, /* param[0][3] */ + PARA7c_xxx = 0xcb38de43, +}; + +static const unsigned long param[4][4] = { + {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43}, + {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, + {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, + {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83} +}; + +static void rtl8139_tune_twister (struct net_device *dev, + struct rtl8139_private *tp) +{ + int linkcase; + void __iomem *ioaddr = tp->mmio_addr; + + /* This is a complicated state machine to configure the "twister" for + impedance/echos based on the cable length. + All of this is magic and undocumented. + */ + switch (tp->twistie) { + case 1: + if (RTL_R16 (CSCR) & CSCR_LinkOKBit) { + /* We have link beat, let us tune the twister. */ + RTL_W16 (CSCR, CSCR_LinkDownOffCmd); + tp->twistie = 2; /* Change to state 2. */ + next_tick = HZ / 10; + } else { + /* Just put in some reasonable defaults for when beat returns. */ + RTL_W16 (CSCR, CSCR_LinkDownCmd); + RTL_W32 (FIFOTMS, 0x20); /* Turn on cable test mode. */ + RTL_W32 (PARA78, PARA78_default); + RTL_W32 (PARA7c, PARA7c_default); + tp->twistie = 0; /* Bail from future actions. */ + } + break; + case 2: + /* Read how long it took to hear the echo. */ + linkcase = RTL_R16 (CSCR) & CSCR_LinkStatusBits; + if (linkcase == 0x7000) + tp->twist_row = 3; + else if (linkcase == 0x3000) + tp->twist_row = 2; + else if (linkcase == 0x1000) + tp->twist_row = 1; + else + tp->twist_row = 0; + tp->twist_col = 0; + tp->twistie = 3; /* Change to state 2. */ + next_tick = HZ / 10; + break; + case 3: + /* Put out four tuning parameters, one per 100msec. */ + if (tp->twist_col == 0) + RTL_W16 (FIFOTMS, 0); + RTL_W32 (PARA7c, param[(int) tp->twist_row] + [(int) tp->twist_col]); + next_tick = HZ / 10; + if (++tp->twist_col >= 4) { + /* For short cables we are done. + For long cables (row == 3) check for mistune. */ + tp->twistie = + (tp->twist_row == 3) ? 4 : 0; + } + break; + case 4: + /* Special case for long cables: check for mistune. */ + if ((RTL_R16 (CSCR) & + CSCR_LinkStatusBits) == 0x7000) { + tp->twistie = 0; + break; + } else { + RTL_W32 (PARA7c, 0xfb38de03); + tp->twistie = 5; + next_tick = HZ / 10; + } + break; + case 5: + /* Retune for shorter cable (column 2). */ + RTL_W32 (FIFOTMS, 0x20); + RTL_W32 (PARA78, PARA78_default); + RTL_W32 (PARA7c, PARA7c_default); + RTL_W32 (FIFOTMS, 0x00); + tp->twist_row = 2; + tp->twist_col = 0; + tp->twistie = 3; + next_tick = HZ / 10; + break; + + default: + /* do nothing */ + break; + } +} +#endif /* CONFIG_8139TOO_TUNE_TWISTER */ + +static inline void rtl8139_thread_iter (struct net_device *dev, + struct rtl8139_private *tp, + void __iomem *ioaddr) +{ + int mii_lpa; + + mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA); + + if (!tp->mii.force_media && mii_lpa != 0xffff) { + int duplex = ((mii_lpa & LPA_100FULL) || + (mii_lpa & 0x01C0) == 0x0040); + if (tp->mii.full_duplex != duplex) { + tp->mii.full_duplex = duplex; + + if (mii_lpa) { + netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n", + tp->mii.full_duplex ? "full" : "half", + tp->phys[0], mii_lpa); + } else { + netdev_info(dev, "media is unconnected, link down, or incompatible connection\n"); + } +#if 0 + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config1, tp->mii.full_duplex ? 0x60 : 0x20); + RTL_W8 (Cfg9346, Cfg9346_Lock); +#endif + } + } + + next_tick = HZ * 60; + + rtl8139_tune_twister (dev, tp); + + netdev_dbg(dev, "Media selection tick, Link partner %04x\n", + RTL_R16(NWayLPAR)); + netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x\n", + RTL_R16(IntrMask), RTL_R16(IntrStatus)); + netdev_dbg(dev, "Chip config %02x %02x\n", + RTL_R8(Config0), RTL_R8(Config1)); +} + +static void rtl8139_thread (struct work_struct *work) +{ + struct rtl8139_private *tp = + container_of(work, struct rtl8139_private, thread.work); + struct net_device *dev = tp->mii.dev; + unsigned long thr_delay = next_tick; + + rtnl_lock(); + + if (!netif_running(dev)) + goto out_unlock; + + if (tp->watchdog_fired) { + tp->watchdog_fired = 0; + rtl8139_tx_timeout_task(work); + } else + rtl8139_thread_iter(dev, tp, tp->mmio_addr); + + if (tp->have_thread) + schedule_delayed_work(&tp->thread, thr_delay); +out_unlock: + rtnl_unlock (); +} + +static void rtl8139_start_thread(struct rtl8139_private *tp) +{ + tp->twistie = 0; + if (tp->chipset == CH_8139_K) + tp->twistie = 1; + else if (tp->drv_flags & HAS_LNK_CHNG) + return; + + tp->have_thread = 1; + tp->watchdog_fired = 0; + + schedule_delayed_work(&tp->thread, next_tick); +} + +static inline void rtl8139_tx_clear (struct rtl8139_private *tp) +{ + tp->cur_tx = 0; + tp->dirty_tx = 0; + + /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ +} + +static void rtl8139_tx_timeout_task (struct work_struct *work) +{ + struct rtl8139_private *tp = + container_of(work, struct rtl8139_private, thread.work); + struct net_device *dev = tp->mii.dev; + void __iomem *ioaddr = tp->mmio_addr; + int i; + u8 tmp8; + + napi_disable(&tp->napi); + netif_stop_queue(dev); + synchronize_rcu(); + + netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n", + RTL_R8(ChipCmd), RTL_R16(IntrStatus), + RTL_R16(IntrMask), RTL_R8(MediaStatus)); + /* Emit info to figure out what went wrong. */ + netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n", + tp->cur_tx, tp->dirty_tx); + for (i = 0; i < NUM_TX_DESC; i++) + netdev_dbg(dev, "Tx descriptor %d is %08x%s\n", + i, RTL_R32(TxStatus0 + (i * 4)), + i == tp->dirty_tx % NUM_TX_DESC ? + " (queue head)" : ""); + + tp->xstats.tx_timeouts++; + + /* disable Tx ASAP, if not already */ + tmp8 = RTL_R8 (ChipCmd); + if (tmp8 & CmdTxEnb) + RTL_W8 (ChipCmd, CmdRxEnb); + + spin_lock_bh(&tp->rx_lock); + /* Disable interrupts by clearing the interrupt mask. */ + RTL_W16 (IntrMask, 0x0000); + + /* Stop a shared interrupt from scavenging while we are. */ + spin_lock_irq(&tp->lock); + rtl8139_tx_clear (tp); + spin_unlock_irq(&tp->lock); + + /* ...and finally, reset everything */ + napi_enable(&tp->napi); + rtl8139_hw_start(dev); + netif_wake_queue(dev); + + spin_unlock_bh(&tp->rx_lock); +} + +static void rtl8139_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct rtl8139_private *tp = netdev_priv(dev); + + tp->watchdog_fired = 1; + if (!tp->have_thread) { + INIT_DELAYED_WORK(&tp->thread, rtl8139_thread); + schedule_delayed_work(&tp->thread, next_tick); + } +} + +static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, + struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned int entry; + unsigned int len = skb->len; + unsigned long flags; + + /* Calculate the next Tx descriptor entry. */ + entry = tp->cur_tx % NUM_TX_DESC; + + /* Note: the chip doesn't have auto-pad! */ + if (likely(len < TX_BUF_SIZE)) { + if (len < ETH_ZLEN) + memset(tp->tx_buf[entry], 0, ETH_ZLEN); + skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); + dev_kfree_skb_any(skb); + } else { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + spin_lock_irqsave(&tp->lock, flags); + /* + * Writing to TxStatus triggers a DMA transfer of the data + * copied to tp->tx_buf[entry] above. Use a memory barrier + * to make sure that the device sees the updated data. + */ + wmb(); + RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), + tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); + + tp->cur_tx++; + + if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) + netif_stop_queue (dev); + spin_unlock_irqrestore(&tp->lock, flags); + + netif_dbg(tp, tx_queued, dev, "Queued Tx packet size %u to slot %d\n", + len, entry); + + return NETDEV_TX_OK; +} + + +static void rtl8139_tx_interrupt (struct net_device *dev, + struct rtl8139_private *tp, + void __iomem *ioaddr) +{ + unsigned long dirty_tx, tx_left; + + assert (dev != NULL); + assert (ioaddr != NULL); + + dirty_tx = tp->dirty_tx; + tx_left = tp->cur_tx - dirty_tx; + while (tx_left > 0) { + int entry = dirty_tx % NUM_TX_DESC; + int txstatus; + + txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32))); + + if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted))) + break; /* It still hasn't been Txed */ + + /* Note: TxCarrierLost is always asserted at 100mbps. */ + if (txstatus & (TxOutOfWindow | TxAborted)) { + /* There was an major error, log it. */ + netif_dbg(tp, tx_err, dev, "Transmit error, Tx status %08x\n", + txstatus); + dev->stats.tx_errors++; + if (txstatus & TxAborted) { + dev->stats.tx_aborted_errors++; + RTL_W32 (TxConfig, TxClearAbt); + RTL_W16 (IntrStatus, TxErr); + wmb(); + } + if (txstatus & TxCarrierLost) + dev->stats.tx_carrier_errors++; + if (txstatus & TxOutOfWindow) + dev->stats.tx_window_errors++; + } else { + if (txstatus & TxUnderrun) { + /* Add 64 to the Tx FIFO threshold. */ + if (tp->tx_flag < 0x00300000) + tp->tx_flag += 0x00020000; + dev->stats.tx_fifo_errors++; + } + dev->stats.collisions += (txstatus >> 24) & 15; + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += txstatus & 0x7ff; + u64_stats_update_end(&tp->tx_stats.syncp); + } + + dirty_tx++; + tx_left--; + } + +#ifndef RTL8139_NDEBUG + if (tp->cur_tx - dirty_tx > NUM_TX_DESC) { + netdev_err(dev, "Out-of-sync dirty pointer, %ld vs. %ld\n", + dirty_tx, tp->cur_tx); + dirty_tx += NUM_TX_DESC; + } +#endif /* RTL8139_NDEBUG */ + + /* only wake the queue if we did work, and the queue is stopped */ + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + mb(); + netif_wake_queue (dev); + } +} + + +/* TODO: clean this up! Rx reset need not be this intensive */ +static void rtl8139_rx_err (u32 rx_status, struct net_device *dev, + struct rtl8139_private *tp, void __iomem *ioaddr) +{ + u8 tmp8; +#ifdef CONFIG_8139_OLD_RX_RESET + int tmp_work; +#endif + + netif_dbg(tp, rx_err, dev, "Ethernet frame had errors, status %08x\n", + rx_status); + dev->stats.rx_errors++; + if (!(rx_status & RxStatusOK)) { + if (rx_status & RxTooLong) { + netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n", + rx_status); + /* A.C.: The chip hangs here. */ + } + if (rx_status & (RxBadSymbol | RxBadAlign)) + dev->stats.rx_frame_errors++; + if (rx_status & (RxRunt | RxTooLong)) + dev->stats.rx_length_errors++; + if (rx_status & RxCRCErr) + dev->stats.rx_crc_errors++; + } else { + tp->xstats.rx_lost_in_ring++; + } + +#ifndef CONFIG_8139_OLD_RX_RESET + tmp8 = RTL_R8 (ChipCmd); + RTL_W8 (ChipCmd, tmp8 & ~CmdRxEnb); + RTL_W8 (ChipCmd, tmp8); + RTL_W32 (RxConfig, tp->rx_config); + tp->cur_rx = 0; +#else + /* Reset the receiver, based on RealTek recommendation. (Bug?) */ + + /* disable receive */ + RTL_W8_F (ChipCmd, CmdTxEnb); + tmp_work = 200; + while (--tmp_work > 0) { + udelay(1); + tmp8 = RTL_R8 (ChipCmd); + if (!(tmp8 & CmdRxEnb)) + break; + } + if (tmp_work <= 0) + netdev_warn(dev, "rx stop wait too long\n"); + /* restart receive */ + tmp_work = 200; + while (--tmp_work > 0) { + RTL_W8_F (ChipCmd, CmdRxEnb | CmdTxEnb); + udelay(1); + tmp8 = RTL_R8 (ChipCmd); + if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb)) + break; + } + if (tmp_work <= 0) + netdev_warn(dev, "tx/rx enable wait too long\n"); + + /* and reinitialize all rx related registers */ + RTL_W8_F (Cfg9346, Cfg9346_Unlock); + /* Must enable Tx/Rx before setting transfer thresholds! */ + RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); + + tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys; + RTL_W32 (RxConfig, tp->rx_config); + tp->cur_rx = 0; + + netdev_dbg(dev, "init buffer addresses\n"); + + /* Lock Config[01234] and BMCR register writes */ + RTL_W8 (Cfg9346, Cfg9346_Lock); + + /* init Rx ring buffer DMA address */ + RTL_W32_F (RxBuf, tp->rx_ring_dma); + + /* A.C.: Reset the multicast list. */ + __set_rx_mode (dev); +#endif +} + +#if RX_BUF_IDX == 3 +static inline void wrap_copy(struct sk_buff *skb, const unsigned char *ring, + u32 offset, unsigned int size) +{ + u32 left = RX_BUF_LEN - offset; + + if (size > left) { + skb_copy_to_linear_data(skb, ring + offset, left); + skb_copy_to_linear_data_offset(skb, left, ring, size - left); + } else + skb_copy_to_linear_data(skb, ring + offset, size); +} +#endif + +static void rtl8139_isr_ack(struct rtl8139_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u16 status; + + status = RTL_R16 (IntrStatus) & RxAckBits; + + /* Clear out errors and receive interrupts */ + if (likely(status != 0)) { + if (unlikely(status & (RxFIFOOver | RxOverflow))) { + tp->dev->stats.rx_errors++; + if (status & RxFIFOOver) + tp->dev->stats.rx_fifo_errors++; + } + RTL_W16_F (IntrStatus, RxAckBits); + } +} + +static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, + int budget) +{ + void __iomem *ioaddr = tp->mmio_addr; + int received = 0; + unsigned char *rx_ring = tp->rx_ring; + unsigned int cur_rx = tp->cur_rx; + unsigned int rx_size = 0; + + netdev_dbg(dev, "In %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n", + __func__, (u16)cur_rx, + RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd)); + + while (netif_running(dev) && received < budget && + (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) { + u32 ring_offset = cur_rx % RX_BUF_LEN; + u32 rx_status; + unsigned int pkt_size; + struct sk_buff *skb; + + rmb(); + + /* read size+status of next frame from DMA ring buffer */ + rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset)); + rx_size = rx_status >> 16; + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = rx_size - 4; + else + pkt_size = rx_size; + + netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n", + __func__, rx_status, rx_size, cur_rx); +#if RTL8139_DEBUG > 2 + print_hex_dump(KERN_DEBUG, "Frame contents: ", + DUMP_PREFIX_OFFSET, 16, 1, + &rx_ring[ring_offset], 70, true); +#endif + + /* Packet copy from FIFO still in progress. + * Theoretically, this should never happen + * since EarlyRx is disabled. + */ + if (unlikely(rx_size == 0xfff0)) { + if (!tp->fifo_copy_timeout) + tp->fifo_copy_timeout = jiffies + 2; + else if (time_after(jiffies, tp->fifo_copy_timeout)) { + netdev_dbg(dev, "hung FIFO. Reset\n"); + rx_size = 0; + goto no_early_rx; + } + netif_dbg(tp, intr, dev, "fifo copy in progress\n"); + tp->xstats.early_rx++; + break; + } + +no_early_rx: + tp->fifo_copy_timeout = 0; + + /* If Rx err or invalid rx_size/rx_status received + * (which happens if we get lost in the ring), + * Rx process gets reset, so we abort any further + * Rx processing. + */ + if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) || + (rx_size < 8) || + (!(rx_status & RxStatusOK)))) { + if ((dev->features & NETIF_F_RXALL) && + (rx_size <= (MAX_ETH_FRAME_SIZE + 4)) && + (rx_size >= 8) && + (!(rx_status & RxStatusOK))) { + /* Length is at least mostly OK, but pkt has + * error. I'm hoping we can handle some of these + * errors without resetting the chip. --Ben + */ + dev->stats.rx_errors++; + if (rx_status & RxCRCErr) { + dev->stats.rx_crc_errors++; + goto keep_pkt; + } + if (rx_status & RxRunt) { + dev->stats.rx_length_errors++; + goto keep_pkt; + } + } + rtl8139_rx_err (rx_status, dev, tp, ioaddr); + received = -1; + goto out; + } + +keep_pkt: + /* Malloc up new buffer, compatible with net-2e. */ + /* Omit the four octet CRC from the length. */ + + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (likely(skb)) { +#if RX_BUF_IDX == 3 + wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); +#else + skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size); +#endif + skb_put (skb, pkt_size); + + skb->protocol = eth_type_trans (skb, dev); + + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); + + netif_receive_skb (skb); + } else { + dev->stats.rx_dropped++; + } + received++; + + cur_rx = (cur_rx + rx_size + 4 + 3) & ~3; + RTL_W16 (RxBufPtr, (u16) (cur_rx - 16)); + + rtl8139_isr_ack(tp); + } + + if (unlikely(!received || rx_size == 0xfff0)) + rtl8139_isr_ack(tp); + + netdev_dbg(dev, "Done %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n", + __func__, cur_rx, + RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd)); + + tp->cur_rx = cur_rx; + + /* + * The receive buffer should be mostly empty. + * Tell NAPI to reenable the Rx irq. + */ + if (tp->fifo_copy_timeout) + received = budget; + +out: + return received; +} + + +static void rtl8139_weird_interrupt (struct net_device *dev, + struct rtl8139_private *tp, + void __iomem *ioaddr, + int status, int link_changed) +{ + netdev_dbg(dev, "Abnormal interrupt, status %08x\n", status); + + assert (dev != NULL); + assert (tp != NULL); + assert (ioaddr != NULL); + + /* Update the error count. */ + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + + if ((status & RxUnderrun) && link_changed && + (tp->drv_flags & HAS_LNK_CHNG)) { + rtl_check_media(dev, 0); + status &= ~RxUnderrun; + } + + if (status & (RxUnderrun | RxErr)) + dev->stats.rx_errors++; + + if (status & PCSTimeout) + dev->stats.rx_length_errors++; + if (status & RxUnderrun) + dev->stats.rx_fifo_errors++; + if (status & PCIErr) { + u16 pci_cmd_status; + pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status); + pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status); + + netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status); + } +} + +static int rtl8139_poll(struct napi_struct *napi, int budget) +{ + struct rtl8139_private *tp = container_of(napi, struct rtl8139_private, napi); + struct net_device *dev = tp->dev; + void __iomem *ioaddr = tp->mmio_addr; + int work_done; + + spin_lock(&tp->rx_lock); + work_done = 0; + if (likely(RTL_R16(IntrStatus) & RxAckBits)) + work_done += rtl8139_rx(dev, tp, budget); + + if (work_done < budget) { + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + if (napi_complete_done(napi, work_done)) + RTL_W16_F(IntrMask, rtl8139_intr_mask); + spin_unlock_irqrestore(&tp->lock, flags); + } + spin_unlock(&tp->rx_lock); + + return work_done; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) +{ + struct net_device *dev = (struct net_device *) dev_instance; + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u16 status, ackstat; + int link_changed = 0; /* avoid bogus "uninit" warning */ + int handled = 0; + + spin_lock (&tp->lock); + status = RTL_R16 (IntrStatus); + + /* shared irq? */ + if (unlikely((status & rtl8139_intr_mask) == 0)) + goto out; + + handled = 1; + + /* h/w no longer present (hotplug?) or major error, bail */ + if (unlikely(status == 0xFFFF)) + goto out; + + /* close possible race's with dev_close */ + if (unlikely(!netif_running(dev))) { + RTL_W16 (IntrMask, 0); + goto out; + } + + /* Acknowledge all of the current interrupt sources ASAP, but + an first get an additional status bit from CSCR. */ + if (unlikely(status & RxUnderrun)) + link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit; + + ackstat = status & ~(RxAckBits | TxErr); + if (ackstat) + RTL_W16 (IntrStatus, ackstat); + + /* Receive packets are processed by poll routine. + If not running start it now. */ + if (status & RxAckBits){ + if (napi_schedule_prep(&tp->napi)) { + RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); + __napi_schedule(&tp->napi); + } + } + + /* Check uncommon events with one test. */ + if (unlikely(status & (PCIErr | PCSTimeout | RxUnderrun | RxErr))) + rtl8139_weird_interrupt (dev, tp, ioaddr, + status, link_changed); + + if (status & (TxOK | TxErr)) { + rtl8139_tx_interrupt (dev, tp, ioaddr); + if (status & TxErr) + RTL_W16 (IntrStatus, TxErr); + } + out: + spin_unlock (&tp->lock); + + netdev_dbg(dev, "exiting interrupt, intr_status=%#4.4x\n", + RTL_R16(IntrStatus)); + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void rtl8139_poll_controller(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + + disable_irq_nosync(irq); + rtl8139_interrupt(irq, dev); + enable_irq(irq); +} +#endif + +static int rtl8139_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(dev, addr->sa_data); + + spin_lock_irq(&tp->lock); + + RTL_W8_F(Cfg9346, Cfg9346_Unlock); + RTL_W32_F(MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0))); + RTL_W32_F(MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4))); + RTL_W8_F(Cfg9346, Cfg9346_Lock); + + spin_unlock_irq(&tp->lock); + + return 0; +} + +static int rtl8139_close (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + netif_stop_queue(dev); + napi_disable(&tp->napi); + + netif_dbg(tp, ifdown, dev, "Shutting down ethercard, status was 0x%04x\n", + RTL_R16(IntrStatus)); + + spin_lock_irqsave (&tp->lock, flags); + + /* Stop the chip's Tx and Rx DMA processes. */ + RTL_W8 (ChipCmd, 0); + + /* Disable interrupts by clearing the interrupt mask. */ + RTL_W16 (IntrMask, 0); + + /* Update the error counts. */ + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + + spin_unlock_irqrestore (&tp->lock, flags); + + free_irq(tp->pci_dev->irq, dev); + + rtl8139_tx_clear (tp); + + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + tp->rx_ring, tp->rx_ring_dma); + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + tp->tx_bufs, tp->tx_bufs_dma); + tp->rx_ring = NULL; + tp->tx_bufs = NULL; + + /* Green! Put the chip in low-power mode. */ + RTL_W8 (Cfg9346, Cfg9346_Unlock); + + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */ + + return 0; +} + + +/* Get the ethtool Wake-on-LAN settings. Assumes that wol points to + kernel memory, *wol has been initialized as {ETHTOOL_GWOL}, and + other threads or interrupts aren't messing with the 8139. */ +static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + spin_lock_irq(&tp->lock); + if (rtl_chip_info[tp->chipset].flags & HasLWake) { + u8 cfg3 = RTL_R8 (Config3); + u8 cfg5 = RTL_R8 (Config5); + + wol->supported = WAKE_PHY | WAKE_MAGIC + | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; + + wol->wolopts = 0; + if (cfg3 & Cfg3_LinkUp) + wol->wolopts |= WAKE_PHY; + if (cfg3 & Cfg3_Magic) + wol->wolopts |= WAKE_MAGIC; + /* (KON)FIXME: See how netdev_set_wol() handles the + following constants. */ + if (cfg5 & Cfg5_UWF) + wol->wolopts |= WAKE_UCAST; + if (cfg5 & Cfg5_MWF) + wol->wolopts |= WAKE_MCAST; + if (cfg5 & Cfg5_BWF) + wol->wolopts |= WAKE_BCAST; + } + spin_unlock_irq(&tp->lock); +} + + +/* Set the ethtool Wake-on-LAN settings. Return 0 or -errno. Assumes + that wol points to kernel memory and other threads or interrupts + aren't messing with the 8139. */ +static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 support; + u8 cfg3, cfg5; + + support = ((rtl_chip_info[tp->chipset].flags & HasLWake) + ? (WAKE_PHY | WAKE_MAGIC + | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST) + : 0); + if (wol->wolopts & ~support) + return -EINVAL; + + spin_lock_irq(&tp->lock); + cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic); + if (wol->wolopts & WAKE_PHY) + cfg3 |= Cfg3_LinkUp; + if (wol->wolopts & WAKE_MAGIC) + cfg3 |= Cfg3_Magic; + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config3, cfg3); + RTL_W8 (Cfg9346, Cfg9346_Lock); + + cfg5 = RTL_R8 (Config5) & ~(Cfg5_UWF | Cfg5_MWF | Cfg5_BWF); + /* (KON)FIXME: These are untested. We may have to set the + CRC0, Wakeup0 and LSBCRC0 registers too, but I have no + documentation. */ + if (wol->wolopts & WAKE_UCAST) + cfg5 |= Cfg5_UWF; + if (wol->wolopts & WAKE_MCAST) + cfg5 |= Cfg5_MWF; + if (wol->wolopts & WAKE_BCAST) + cfg5 |= Cfg5_BWF; + RTL_W8 (Config5, cfg5); /* need not unlock via Cfg9346 */ + spin_unlock_irq(&tp->lock); + + return 0; +} + +static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct rtl8139_private *tp = netdev_priv(dev); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->version, DRV_VERSION, sizeof(info->version)); + strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); +} + +static int rtl8139_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct rtl8139_private *tp = netdev_priv(dev); + spin_lock_irq(&tp->lock); + mii_ethtool_get_link_ksettings(&tp->mii, cmd); + spin_unlock_irq(&tp->lock); + return 0; +} + +static int rtl8139_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int rc; + spin_lock_irq(&tp->lock); + rc = mii_ethtool_set_link_ksettings(&tp->mii, cmd); + spin_unlock_irq(&tp->lock); + return rc; +} + +static int rtl8139_nway_reset(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + return mii_nway_restart(&tp->mii); +} + +static u32 rtl8139_get_link(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + return mii_link_ok(&tp->mii); +} + +static u32 rtl8139_get_msglevel(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + return tp->msg_enable; +} + +static void rtl8139_set_msglevel(struct net_device *dev, u32 datum) +{ + struct rtl8139_private *tp = netdev_priv(dev); + tp->msg_enable = datum; +} + +static int rtl8139_get_regs_len(struct net_device *dev) +{ + struct rtl8139_private *tp; + /* TODO: we are too slack to do reg dumping for pio, for now */ + if (use_io) + return 0; + tp = netdev_priv(dev); + return tp->regs_len; +} + +static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) +{ + struct rtl8139_private *tp; + + /* TODO: we are too slack to do reg dumping for pio, for now */ + if (use_io) + return; + tp = netdev_priv(dev); + + regs->version = RTL_REGS_VER; + + spin_lock_irq(&tp->lock); + memcpy_fromio(regbuf, tp->mmio_addr, regs->len); + spin_unlock_irq(&tp->lock); +} + +static int rtl8139_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return RTL_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) +{ + struct rtl8139_private *tp = netdev_priv(dev); + + data[0] = tp->xstats.early_rx; + data[1] = tp->xstats.tx_buf_mapped; + data[2] = tp->xstats.tx_timeouts; + data[3] = tp->xstats.rx_lost_in_ring; +} + +static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); +} + +static const struct ethtool_ops rtl8139_ethtool_ops = { + .get_drvinfo = rtl8139_get_drvinfo, + .get_regs_len = rtl8139_get_regs_len, + .get_regs = rtl8139_get_regs, + .nway_reset = rtl8139_nway_reset, + .get_link = rtl8139_get_link, + .get_msglevel = rtl8139_get_msglevel, + .set_msglevel = rtl8139_set_msglevel, + .get_wol = rtl8139_get_wol, + .set_wol = rtl8139_set_wol, + .get_strings = rtl8139_get_strings, + .get_sset_count = rtl8139_get_sset_count, + .get_ethtool_stats = rtl8139_get_ethtool_stats, + .get_link_ksettings = rtl8139_get_link_ksettings, + .set_link_ksettings = rtl8139_set_link_ksettings, +}; + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int rc; + + if (!netif_running(dev)) + return -EINVAL; + + spin_lock_irq(&tp->lock); + rc = generic_mii_ioctl(&tp->mii, if_mii(rq), cmd, NULL); + spin_unlock_irq(&tp->lock); + + return rc; +} + + +static void +rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + unsigned int start; + + if (netif_running(dev)) { + spin_lock_irqsave (&tp->lock, flags); + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + spin_unlock_irqrestore (&tp->lock, flags); + } + + netdev_stats_to_stats64(stats, &dev->stats); + + do { + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); + stats->rx_packets = tp->rx_stats.packets; + stats->rx_bytes = tp->rx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); +} + +/* Set or clear the multicast filter for this adaptor. + This routine is not state sensitive and need not be SMP locked. */ + +static void __set_rx_mode (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp; + + netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n", + dev->flags, RTL_R32(RxConfig)); + + /* Note: do not reorder, GCC is clever about common statements. */ + if (dev->flags & IFF_PROMISC) { + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + + /* We can safely update without stopping the chip. */ + tmp = rtl8139_rx_config | rx_mode; + if (tp->rx_config != tmp) { + RTL_W32_F (RxConfig, tmp); + tp->rx_config = tmp; + } + RTL_W32_F (MAR0 + 0, mc_filter[0]); + RTL_W32_F (MAR0 + 4, mc_filter[1]); +} + +static void rtl8139_set_rx_mode (struct net_device *dev) +{ + unsigned long flags; + struct rtl8139_private *tp = netdev_priv(dev); + + spin_lock_irqsave (&tp->lock, flags); + __set_rx_mode(dev); + spin_unlock_irqrestore (&tp->lock, flags); +} + +static int __maybe_unused rtl8139_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + if (!netif_running (dev)) + return 0; + + netif_device_detach (dev); + + spin_lock_irqsave (&tp->lock, flags); + + /* Disable interrupts, stop Tx and Rx. */ + RTL_W16 (IntrMask, 0); + RTL_W8 (ChipCmd, 0); + + /* Update the error counts. */ + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + + spin_unlock_irqrestore (&tp->lock, flags); + + return 0; +} + +static int __maybe_unused rtl8139_resume(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + + if (!netif_running (dev)) + return 0; + + rtl8139_init_ring (dev); + rtl8139_hw_start (dev); + netif_device_attach (dev); + return 0; +} + +static SIMPLE_DEV_PM_OPS(rtl8139_pm_ops, rtl8139_suspend, rtl8139_resume); + +static struct pci_driver rtl8139_pci_driver = { + .name = DRV_NAME, + .id_table = rtl8139_pci_tbl, + .probe = rtl8139_init_one, + .remove = rtl8139_remove_one, + .driver.pm = &rtl8139_pm_ops, +}; + + +static int __init rtl8139_init_module (void) +{ + /* when we're a module, we always print a version message, + * even if no 8139 board is found. + */ +#ifdef MODULE + pr_info(RTL8139_DRIVER_NAME "\n"); +#endif + + return pci_register_driver(&rtl8139_pci_driver); +} + + +static void __exit rtl8139_cleanup_module (void) +{ + pci_unregister_driver (&rtl8139_pci_driver); +} + + +module_init(rtl8139_init_module); +module_exit(rtl8139_cleanup_module); diff --git a/devices/Kbuild.in b/devices/Kbuild.in index d3a28b68..cf5705dd 100644 --- a/devices/Kbuild.in +++ b/devices/Kbuild.in @@ -70,16 +70,28 @@ ifeq (@ENABLE_E1000E@,1) obj-m += e1000e/ endif +ifeq (@ENABLE_GENET@,1) + obj-m += genet/ +endif + ifeq (@ENABLE_IGB@,1) obj-m += igb/ endif +ifeq (@ENABLE_IGC@,1) + obj-m += igc/ +endif + ifeq (@ENABLE_R8169@,1) +ifeq (@R8169_IN_SUBDIR@,1) + obj-m += r8169/ +else EC_R8169_OBJ := r8169-@KERNEL_R8169@-ethercat.o obj-m += ec_r8169.o ec_r8169-objs := $(EC_R8169_OBJ) CFLAGS_$(EC_R8169_OBJ) = -DREV=$(REV) endif +endif KBUILD_EXTRA_SYMBOLS := \ @abs_top_builddir@/$(LINUX_SYMVERS) \ diff --git a/devices/Makefile.am b/devices/Makefile.am index f862e011..238f060c 100644 --- a/devices/Makefile.am +++ b/devices/Makefile.am @@ -31,7 +31,10 @@ SUBDIRS = \ ccat \ e1000 \ e1000e \ - igb + genet \ + igb \ + igc \ + r8169 # using HEADERS to enable tags target noinst_HEADERS = \ @@ -95,6 +98,8 @@ noinst_HEADERS = \ 8139too-4.4-orig.c \ 8139too-5.10-ethercat.c \ 8139too-5.10-orig.c \ + 8139too-6.1-ethercat.c \ + 8139too-6.1-orig.c \ create_driver_table.py \ device_drivers_template.md \ e100-2.6.20-ethercat.c \ @@ -137,6 +142,18 @@ noinst_HEADERS = \ e100-3.8-orig.c \ e100-4.4-ethercat.c \ e100-4.4-orig.c \ + e100-5.4-ethercat.c \ + e100-5.4-orig.c \ + e100-5.10-ethercat.c \ + e100-5.10-orig.c \ + e100-5.14-ethercat.c \ + e100-5.14-orig.c \ + e100-5.15-ethercat.c \ + e100-5.15-orig.c \ + e100-6.1-ethercat.c \ + e100-6.1-orig.c \ + e100-6.4-ethercat.c \ + e100-6.4-orig.c \ ecdev.h \ generic.c \ r8169-2.6.24-ethercat.c \ diff --git a/devices/ccat/netdev.c b/devices/ccat/netdev.c index 44d83607..44e7f669 100644 --- a/devices/ccat/netdev.c +++ b/devices/ccat/netdev.c @@ -249,9 +249,11 @@ struct ccat_mac_register { u8 mii_connected; }; +static void ccat_eth_fifo_reset(struct ccat_eth_fifo *const fifo); static void fifo_set_end(struct ccat_eth_fifo *const fifo, size_t size) { fifo->end = fifo->mem.start + size - sizeof(struct ccat_eth_frame); + ccat_eth_fifo_reset(fifo); } static void ccat_dma_free(struct ccat_eth_priv *const priv) diff --git a/devices/create_driver_table.py b/devices/create_driver_table.py index d492474b..1a3e5cc9 100644 --- a/devices/create_driver_table.py +++ b/devices/create_driver_table.py @@ -37,11 +37,16 @@ DRIVER_MAP=( (".", "e100", "e100"), ("e1000", "e1000", "e1000_main"), ("e1000e", "e1000e", "netdev"), + ("genet", "bcmgenet", "bcmgenet"), ("igb", "igb", "igb_main"), + ("igc", "igc", "igc_main"), (".", "r8169", "r8169"), + ("r8169", "r8169", "r8169_main"), ) -DRIVERS = tuple([x[1] for x in DRIVER_MAP]) + +DRIVERS = sorted(set([x[1] for x in DRIVER_MAP])) + def compile_regex(prefix, file_extension): """ diff --git a/devices/device_drivers_template.md b/devices/device_drivers_template.md index 9ada925a..63ee5b62 100644 --- a/devices/device_drivers_template.md +++ b/devices/device_drivers_template.md @@ -5,3 +5,17 @@ This table contains a list of all available native drivers, depending on the kernel version. The `generic` and the `ccat` driver are independent of the kernel version. + +To find out which native driver is required for your network card, +you can use `lspci -vv` or look in `/sys`: +```sh +admin@ipc:~> ls -l /sys/class/net/eth5/device/driver +lrwxrwxrwx 1 root root 0 6. Nov 15:35 /sys/class/net/eth5/device/driver -> ../../../../bus/pci/drivers/e1000 +``` +or +```sh +admin@ipc:~> basename $(readlink /sys/class/net/eth5/device/driver) +e1000 +``` + +In this example, `eth5` uses the `e1000` driver. diff --git a/devices/e100-5.10-ethercat.c b/devices/e100-5.10-ethercat.c new file mode 100644 index 00000000..9b95ca8d --- /dev/null +++ b/devices/e100-5.10-ethercat.c @@ -0,0 +1,3375 @@ +/****************************************************************************** + * + * $Id$ + * + * Copyright (C) 2007-2012 Florian Pose, Ingenieurgemeinschaft IgH + * + * This file is part of the IgH EtherCAT Master. + * + * The IgH EtherCAT Master is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * The IgH EtherCAT Master is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with the IgH EtherCAT Master; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * --- + * + * The license mentioned above concerns the source code only. Using the + * EtherCAT technology and brand is only permitted in compliance with the + * industrial property and similar rights of Beckhoff Automation GmbH. + * + * --- + * + * vim: noexpandtab + * + *****************************************************************************/ + +/** + \file + EtherCAT driver for e100-compatible NICs. +*/ + +/* Former documentation: */ + +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// EtherCAT includes +#include "../globals.h" +#include "ecdev.h" + +#define DRV_NAME "ec_e100" +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver, EtherCAT enabled" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Florian Pose "); +MODULE_LICENSE("GPL"); + +void e100_ec_poll(struct net_device *); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; + +// prevent from being loaded automatically +//MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/* + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + if (nic->ecdev) + return; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags = 0; + unsigned int i; + int err = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + if (!nic->ecdev) { + spin_lock_irqsave(&nic->cb_lock, flags); + } + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + if (!nic->ecdev) { + spin_unlock_irqrestore(&nic->cb_lock, flags); + } + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags = 0; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + if (!nic->ecdev) + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (nic->ecdev || + (netif_running(nic->netdev) || !(nic->flags & wol_magic))) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +#define ADVERTISE_FC_SUPPORTED 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if ((err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog_impl(struct nic *nic) +{ + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + if (nic->ecdev) { + ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0); + return; + } + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + + e100_watchdog_impl(nic); + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = pci_map_single(nic->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (pci_dma_mapping_error(nic->pdev, dma_addr)) { + dev_kfree_skb_any(skb); + skb = NULL; + return -ENOMEM; + } + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + if (!nic->ecdev) + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + if (!nic->ecdev) + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + if (!nic->ecdev) { + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + } + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + if (!nic->ecdev) + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), + PCI_DMA_FROMDEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + if (!nic->ecdev) { + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + } + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + if (!nic->ecdev) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + if (!nic->ecdev) { + dev_kfree_skb_any(skb); + } + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + if (nic->ecdev) { + ecdev_receive(nic->ecdev, + skb->data + sizeof(struct rfd), actual_size - fcs_pad); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + if (ecdev_get_link(nic->ecdev)) { + nic->ec_watchdog_jiffies = jiffies; + } + } else { + netif_receive_skb(skb); + } + if (work_done) + (*work_done)++; + } + + if (nic->ecdev) { + // make receive frame descriptior usable again + memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + rx->dma_addr = 0; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *) rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, + sizeof(struct rfd), PCI_DMA_TODEVICE); + } + } else { + rx->skb = NULL; + } + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + if (!nic->ecdev) { + /* Alloc new skbs to refill list */ + for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if(unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + new_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + + if (!nic->ecdev) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + } + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +void e100_ec_poll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_rx_clean(nic, NULL, 100); + e100_tx_clean(nic); + + if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) { + e100_watchdog_impl(nic); + nic->ec_watchdog_jiffies = jiffies; + } +} + + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + if (!nic->ecdev) { + mod_timer(&nic->watchdog, jiffies); + } + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + if (!nic->ecdev) { + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + } + return 0; + +err_no_irq: + if (!nic->ecdev) + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + if (!nic->ecdev) { + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + } + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + if (!nic->ecdev) { + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + } + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem), + &nic->dma_addr); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + pci_free_consistent(nic->pdev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + if (!nic->ecdev) + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_do_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + // offer device to EtherCAT master module + nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE); + + if (!nic->ecdev) { + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, + "Cannot register net device, aborting\n"); + goto err_out_free; + } + } + + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + if (nic->ecdev) { + err = ecdev_open(nic->ecdev); + if (err) { + ecdev_withdraw(nic->ecdev); + goto err_out_free; + } + } + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + if (nic->ecdev) { + ecdev_close(nic->ecdev); + ecdev_withdraw(nic->ecdev); + } else { + unregister_netdev(netdev); + } + + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + e100_down(nic); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_disable_device(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int __maybe_unused e100_suspend(struct device *dev_d) +{ + bool wake; + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + return 0; +} + +static int __maybe_unused e100_resume(struct device *dev_d) +{ + struct net_device *netdev = dev_get_drvdata(dev_d); + struct nic *nic = netdev_priv(netdev); + int err; + + err = pci_enable_device(to_pci_dev(dev_d)); + if (err) { + netdev_err(netdev, "Resume cannot enable PCI device, aborting\n"); + return err; + } + pci_set_master(to_pci_dev(dev_d)); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + if (netif_running(netdev)) + e100_up(nic); + + netif_device_attach(netdev); + + return 0; +} + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + if (!nic->ecdev) + netif_device_attach(netdev); + if (nic->ecdev || netif_running(netdev)) { + e100_open(netdev); + if (!nic->ecdev) + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, + + /* Power Management hooks */ + .driver.pm = &e100_pm_ops, + + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s\n", DRV_DESCRIPTION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + printk(KERN_INFO DRV_NAME " cleaning up module...\n"); + pci_unregister_driver(&e100_driver); + printk(KERN_INFO DRV_NAME " module cleaned up.\n"); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e100-5.10-orig.c b/devices/e100-5.10-orig.c new file mode 100644 index 00000000..9295a9a1 --- /dev/null +++ b/devices/e100-5.10-orig.c @@ -0,0 +1,3190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DRV_NAME "e100" +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; +MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/* + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags; + unsigned int i; + int err = 0; + + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + spin_lock_irqsave(&nic->cb_lock, flags); + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + spin_unlock_irqrestore(&nic->cb_lock, flags); + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +#define ADVERTISE_FC_SUPPORTED 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if ((err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = pci_map_single(nic->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (pci_dma_mapping_error(nic->pdev, dma_addr)) { + dev_kfree_skb_any(skb); + skb = NULL; + return -ENOMEM; + } + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), + PCI_DMA_FROMDEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + dev_kfree_skb_any(skb); + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + netif_receive_skb(skb); + if (work_done) + (*work_done)++; + } + + rx->skb = NULL; + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + /* Alloc new skbs to refill list */ + for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if (unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + new_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + mod_timer(&nic->watchdog, jiffies); + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + return 0; + +err_no_irq: + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem), + &nic->dma_addr); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + pci_free_consistent(nic->pdev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_do_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); + goto err_out_free; + } + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + unregister_netdev(netdev); + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + e100_down(nic); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_disable_device(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int __maybe_unused e100_suspend(struct device *dev_d) +{ + bool wake; + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + return 0; +} + +static int __maybe_unused e100_resume(struct device *dev_d) +{ + struct net_device *netdev = dev_get_drvdata(dev_d); + struct nic *nic = netdev_priv(netdev); + int err; + + err = pci_enable_device(to_pci_dev(dev_d)); + if (err) { + netdev_err(netdev, "Resume cannot enable PCI device, aborting\n"); + return err; + } + pci_set_master(to_pci_dev(dev_d)); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + if (netif_running(netdev)) + e100_up(nic); + + netif_device_attach(netdev); + + return 0; +} + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + netif_device_attach(netdev); + if (netif_running(netdev)) { + e100_open(netdev); + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, + + /* Power Management hooks */ + .driver.pm = &e100_pm_ops, + + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s\n", DRV_DESCRIPTION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + pci_unregister_driver(&e100_driver); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e100-5.14-ethercat.c b/devices/e100-5.14-ethercat.c new file mode 100644 index 00000000..8c16719f --- /dev/null +++ b/devices/e100-5.14-ethercat.c @@ -0,0 +1,3397 @@ +/****************************************************************************** + * + * $Id$ + * + * Copyright (C) 2007-2012 Florian Pose, Ingenieurgemeinschaft IgH + * + * This file is part of the IgH EtherCAT Master. + * + * The IgH EtherCAT Master is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * The IgH EtherCAT Master is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with the IgH EtherCAT Master; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * --- + * + * The license mentioned above concerns the source code only. Using the + * EtherCAT technology and brand is only permitted in compliance with the + * industrial property and similar rights of Beckhoff Automation GmbH. + * + * --- + * + * vim: noexpandtab + * + *****************************************************************************/ + +/** + \file + EtherCAT driver for e100-compatible NICs. +*/ + +/* Former documentation: */ + +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// EtherCAT includes +#include "../globals.h" +#include "ecdev.h" + +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + + +#define DRV_NAME "ec_e100" +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver, EtherCAT enabled" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Florian Pose "); +MODULE_LICENSE("GPL"); + +void e100_ec_poll(struct net_device *); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; + +// prevent from being loaded automatically +//MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/* + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + if (nic->ecdev) + return; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags = 0; + unsigned int i; + int err = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + if (!nic->ecdev) { + spin_lock_irqsave(&nic->cb_lock, flags); + } + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + if (!nic->ecdev) { + spin_unlock_irqrestore(&nic->cb_lock, flags); + } + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags = 0; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + if (!nic->ecdev) + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (nic->ecdev || + (netif_running(nic->netdev) || !(nic->flags & wol_magic))) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +#define ADVERTISE_FC_SUPPORTED 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if ((err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog_impl(struct nic *nic) +{ + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + if (nic->ecdev) { + ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0); + return; + } + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + + e100_watchdog_impl(nic); + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { + dev_kfree_skb_any(skb); + skb = NULL; + return -ENOMEM; + } + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + if (!nic->ecdev) + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + if (!nic->ecdev) + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + if (!nic->ecdev) { + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + } + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + if (!nic->ecdev) + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, + rx->prev->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), + DMA_FROM_DEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + if (!nic->ecdev) { + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + } + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + if (!nic->ecdev) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + if (!nic->ecdev) { + dev_kfree_skb_any(skb); + } + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + if (nic->ecdev) { + ecdev_receive(nic->ecdev, + skb->data + sizeof(struct rfd), actual_size - fcs_pad); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + if (ecdev_get_link(nic->ecdev)) { + nic->ec_watchdog_jiffies = jiffies; + } + } else { + netif_receive_skb(skb); + } + if (work_done) + (*work_done)++; + } + + if (nic->ecdev) { + // make receive frame descriptior usable again + memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + rx->dma_addr = 0; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *) rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, rx->prev->dma_addr, + sizeof(struct rfd), DMA_TO_DEVICE); + } + } else { + rx->skb = NULL; + } + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + if (!nic->ecdev) { + /* Alloc new skbs to refill list */ + for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if(unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + new_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + dma_unmap_single(&nic->pdev->dev, + rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + + if (!nic->ecdev) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + } + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +void e100_ec_poll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_rx_clean(nic, NULL, 100); + e100_tx_clean(nic); + + if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) { + e100_watchdog_impl(nic); + nic->ec_watchdog_jiffies = jiffies; + } +} + + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + if (!nic->ecdev) { + mod_timer(&nic->watchdog, jiffies); + } + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + if (!nic->ecdev) { + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + } + return 0; + +err_no_irq: + if (!nic->ecdev) + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + if (!nic->ecdev) { + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + } + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + if (!nic->ecdev) { + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + } + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5 + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5 + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), + &nic->dma_addr, GFP_KERNEL); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + if (!nic->ecdev) + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_do_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + // offer device to EtherCAT master module + nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE); + + if (!nic->ecdev) { + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, + "Cannot register net device, aborting\n"); + goto err_out_free; + } + } + + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + if (nic->ecdev) { + err = ecdev_open(nic->ecdev); + if (err) { + ecdev_withdraw(nic->ecdev); + goto err_out_free; + } + } + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + if (nic->ecdev) { + ecdev_close(nic->ecdev); + ecdev_withdraw(nic->ecdev); + } else { + unregister_netdev(netdev); + } + + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (netif_running(netdev)) + e100_down(nic); + netif_device_detach(netdev); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_clear_master(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int __maybe_unused e100_suspend(struct device *dev_d) +{ + bool wake; + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + device_wakeup_disable(dev_d); + + return 0; +} + +static int __maybe_unused e100_resume(struct device *dev_d) +{ + struct net_device *netdev = dev_get_drvdata(dev_d); + struct nic *nic = netdev_priv(netdev); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + netif_device_attach(netdev); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + if (!nic->ecdev) + netif_device_attach(netdev); + if (nic->ecdev || netif_running(netdev)) { + e100_open(netdev); + if (!nic->ecdev) + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, + + /* Power Management hooks */ + .driver.pm = &e100_pm_ops, + + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s\n", DRV_DESCRIPTION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + printk(KERN_INFO DRV_NAME " cleaning up module...\n"); + pci_unregister_driver(&e100_driver); + printk(KERN_INFO DRV_NAME " module cleaned up.\n"); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e100-5.14-orig.c b/devices/e100-5.14-orig.c new file mode 100644 index 00000000..1fa68ebe --- /dev/null +++ b/devices/e100-5.14-orig.c @@ -0,0 +1,3188 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DRV_NAME "e100" +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; +MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/* + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags; + unsigned int i; + int err = 0; + + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + spin_lock_irqsave(&nic->cb_lock, flags); + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + spin_unlock_irqrestore(&nic->cb_lock, flags); + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +#define ADVERTISE_FC_SUPPORTED 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if ((err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { + dev_kfree_skb_any(skb); + skb = NULL; + return -ENOMEM; + } + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, + rx->prev->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), + DMA_FROM_DEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + dev_kfree_skb_any(skb); + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + netif_receive_skb(skb); + if (work_done) + (*work_done)++; + } + + rx->skb = NULL; + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + /* Alloc new skbs to refill list */ + for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if (unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + new_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + dma_unmap_single(&nic->pdev->dev, + rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + mod_timer(&nic->watchdog, jiffies); + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + return 0; + +err_no_irq: + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), + &nic->dma_addr, GFP_KERNEL); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_do_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); + goto err_out_free; + } + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + unregister_netdev(netdev); + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (netif_running(netdev)) + e100_down(nic); + netif_device_detach(netdev); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_clear_master(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int __maybe_unused e100_suspend(struct device *dev_d) +{ + bool wake; + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + device_wakeup_disable(dev_d); + + return 0; +} + +static int __maybe_unused e100_resume(struct device *dev_d) +{ + struct net_device *netdev = dev_get_drvdata(dev_d); + struct nic *nic = netdev_priv(netdev); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + netif_device_attach(netdev); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + netif_device_attach(netdev); + if (netif_running(netdev)) { + e100_open(netdev); + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, + + /* Power Management hooks */ + .driver.pm = &e100_pm_ops, + + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s\n", DRV_DESCRIPTION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + pci_unregister_driver(&e100_driver); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e100-5.15-ethercat.c b/devices/e100-5.15-ethercat.c new file mode 100644 index 00000000..968b17e8 --- /dev/null +++ b/devices/e100-5.15-ethercat.c @@ -0,0 +1,3381 @@ +/****************************************************************************** + * + * $Id$ + * + * Copyright (C) 2007-2012 Florian Pose, Ingenieurgemeinschaft IgH + * + * This file is part of the IgH EtherCAT Master. + * + * The IgH EtherCAT Master is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * The IgH EtherCAT Master is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with the IgH EtherCAT Master; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * --- + * + * The license mentioned above concerns the source code only. Using the + * EtherCAT technology and brand is only permitted in compliance with the + * industrial property and similar rights of Beckhoff Automation GmbH. + * + * --- + * + * vim: noexpandtab + * + *****************************************************************************/ + +/** + \file + EtherCAT driver for e100-compatible NICs. +*/ + +/* Former documentation: */ + +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// EtherCAT includes +#include "../globals.h" +#include "ecdev.h" + +#define DRV_NAME "ec_e100" +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver, EtherCAT enabled" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Florian Pose "); +MODULE_LICENSE("GPL"); + +void e100_ec_poll(struct net_device *); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; + +// prevent from being loaded automatically +//MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/* + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + if (nic->ecdev) + return; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags = 0; + unsigned int i; + int err = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + if (!nic->ecdev) { + spin_lock_irqsave(&nic->cb_lock, flags); + } + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + if (!nic->ecdev) { + spin_unlock_irqrestore(&nic->cb_lock, flags); + } + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags = 0; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + if (!nic->ecdev) + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (nic->ecdev || + (netif_running(nic->netdev) || !(nic->flags & wol_magic))) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +#define ADVERTISE_FC_SUPPORTED 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if ((err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog_impl(struct nic *nic) +{ + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + if (nic->ecdev) { + ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0); + return; + } + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + + e100_watchdog_impl(nic); + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { + dev_kfree_skb_any(skb); + skb = NULL; + return -ENOMEM; + } + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + if (!nic->ecdev) + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + if (!nic->ecdev) + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + if (!nic->ecdev) { + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + } + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + if (!nic->ecdev) + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, + rx->prev->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), + DMA_FROM_DEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + if (!nic->ecdev) { + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + } + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + if (!nic->ecdev) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + if (!nic->ecdev) { + dev_kfree_skb_any(skb); + } + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + if (nic->ecdev) { + ecdev_receive(nic->ecdev, + skb->data + sizeof(struct rfd), actual_size - fcs_pad); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + if (ecdev_get_link(nic->ecdev)) { + nic->ec_watchdog_jiffies = jiffies; + } + } else { + netif_receive_skb(skb); + } + if (work_done) + (*work_done)++; + } + + if (nic->ecdev) { + // make receive frame descriptior usable again + memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + rx->dma_addr = 0; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *) rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, rx->prev->dma_addr, + sizeof(struct rfd), DMA_TO_DEVICE); + } + } else { + rx->skb = NULL; + } + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + if (!nic->ecdev) { + /* Alloc new skbs to refill list */ + for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if(unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + new_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + dma_unmap_single(&nic->pdev->dev, + rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + + if (!nic->ecdev) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + } + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +void e100_ec_poll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_rx_clean(nic, NULL, 100); + e100_tx_clean(nic); + + if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) { + e100_watchdog_impl(nic); + nic->ec_watchdog_jiffies = jiffies; + } +} + + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + if (!nic->ecdev) { + mod_timer(&nic->watchdog, jiffies); + } + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + if (!nic->ecdev) { + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + } + return 0; + +err_no_irq: + if (!nic->ecdev) + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + if (!nic->ecdev) { + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + } + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + if (!nic->ecdev) { + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + } + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), + &nic->dma_addr, GFP_KERNEL); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + if (!nic->ecdev) + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_eth_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + // offer device to EtherCAT master module + nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE); + + if (!nic->ecdev) { + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, + "Cannot register net device, aborting\n"); + goto err_out_free; + } + } + + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + if (nic->ecdev) { + err = ecdev_open(nic->ecdev); + if (err) { + ecdev_withdraw(nic->ecdev); + goto err_out_free; + } + } + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + if (nic->ecdev) { + ecdev_close(nic->ecdev); + ecdev_withdraw(nic->ecdev); + } else { + unregister_netdev(netdev); + } + + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + e100_down(nic); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_disable_device(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int __maybe_unused e100_suspend(struct device *dev_d) +{ + bool wake; + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + return 0; +} + +static int __maybe_unused e100_resume(struct device *dev_d) +{ + struct net_device *netdev = dev_get_drvdata(dev_d); + struct nic *nic = netdev_priv(netdev); + int err; + + err = pci_enable_device(to_pci_dev(dev_d)); + if (err) { + netdev_err(netdev, "Resume cannot enable PCI device, aborting\n"); + return err; + } + pci_set_master(to_pci_dev(dev_d)); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + if (netif_running(netdev)) + e100_up(nic); + + netif_device_attach(netdev); + + return 0; +} + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + if (!nic->ecdev) + netif_device_attach(netdev); + if (nic->ecdev || netif_running(netdev)) { + e100_open(netdev); + if (!nic->ecdev) + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, + + /* Power Management hooks */ + .driver.pm = &e100_pm_ops, + + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s\n", DRV_DESCRIPTION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + printk(KERN_INFO DRV_NAME " cleaning up module...\n"); + pci_unregister_driver(&e100_driver); + printk(KERN_INFO DRV_NAME " module cleaned up.\n"); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e100-5.15-orig.c b/devices/e100-5.15-orig.c new file mode 100644 index 00000000..36d52246 --- /dev/null +++ b/devices/e100-5.15-orig.c @@ -0,0 +1,3196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DRV_NAME "e100" +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; +MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/* + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags; + unsigned int i; + int err = 0; + + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + spin_lock_irqsave(&nic->cb_lock, flags); + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + spin_unlock_irqrestore(&nic->cb_lock, flags); + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +#define ADVERTISE_FC_SUPPORTED 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if ((err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { + dev_kfree_skb_any(skb); + skb = NULL; + return -ENOMEM; + } + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, + rx->prev->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), + DMA_FROM_DEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + dev_kfree_skb_any(skb); + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + netif_receive_skb(skb); + if (work_done) + (*work_done)++; + } + + rx->skb = NULL; + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + /* Alloc new skbs to refill list */ + for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if (unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + new_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + dma_unmap_single(&nic->pdev->dev, + rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + mod_timer(&nic->watchdog, jiffies); + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + return 0; + +err_no_irq: + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), + &nic->dma_addr, GFP_KERNEL); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_eth_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); + goto err_out_free; + } + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + unregister_netdev(netdev); + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + e100_down(nic); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_disable_device(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int __maybe_unused e100_suspend(struct device *dev_d) +{ + bool wake; + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + return 0; +} + +static int __maybe_unused e100_resume(struct device *dev_d) +{ + struct net_device *netdev = dev_get_drvdata(dev_d); + struct nic *nic = netdev_priv(netdev); + int err; + + err = pci_enable_device(to_pci_dev(dev_d)); + if (err) { + netdev_err(netdev, "Resume cannot enable PCI device, aborting\n"); + return err; + } + pci_set_master(to_pci_dev(dev_d)); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + if (netif_running(netdev)) + e100_up(nic); + + netif_device_attach(netdev); + + return 0; +} + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + netif_device_attach(netdev); + if (netif_running(netdev)) { + e100_open(netdev); + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, + + /* Power Management hooks */ + .driver.pm = &e100_pm_ops, + + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s\n", DRV_DESCRIPTION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + pci_unregister_driver(&e100_driver); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e100-5.4-ethercat.c b/devices/e100-5.4-ethercat.c new file mode 100644 index 00000000..e611c2f0 --- /dev/null +++ b/devices/e100-5.4-ethercat.c @@ -0,0 +1,3376 @@ +/****************************************************************************** + * + * $Id$ + * + * Copyright (C) 2007-2012 Florian Pose, Ingenieurgemeinschaft IgH + * + * This file is part of the IgH EtherCAT Master. + * + * The IgH EtherCAT Master is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * The IgH EtherCAT Master is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with the IgH EtherCAT Master; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * --- + * + * The license mentioned above concerns the source code only. Using the + * EtherCAT technology and brand is only permitted in compliance with the + * industrial property and similar rights of Beckhoff Automation GmbH. + * + * --- + * + * vim: noexpandtab + * + *****************************************************************************/ + +/** + \file + EtherCAT driver for e100-compatible NICs. +*/ + +/* Former documentation: */ + +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// EtherCAT includes +#include "../globals.h" +#include "ecdev.h" + +#define DRV_NAME "ec_e100" +#define DRV_EXT "-NAPI" +#define DRV_VERSION "3.5.24-k2"DRV_EXT +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Florian Pose "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION ", master " EC_MASTER_VERSION); + +void e100_ec_poll(struct net_device *); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; + +// prevent from being loaded automatically +//MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/** + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + if (nic->ecdev) + return; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags = 0; + unsigned int i; + int err = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + if (!nic->ecdev) { + spin_lock_irqsave(&nic->cb_lock, flags); + } + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + if (!nic->ecdev) { + spin_unlock_irqrestore(&nic->cb_lock, flags); + } + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags = 0; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + if (!nic->ecdev) + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (nic->ecdev || + (netif_running(nic->netdev) || !(nic->flags & wol_magic))) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +#define ADVERTISE_FC_SUPPORTED 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if (!in_interrupt() && (err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog_impl(struct nic *nic) +{ + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + if (nic->ecdev) { + ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0); + return; + } + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + + e100_watchdog_impl(nic); + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = pci_map_single(nic->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (pci_dma_mapping_error(nic->pdev, dma_addr)) { + dev_kfree_skb_any(skb); + skb = NULL; + return -ENOMEM; + } + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + if (!nic->ecdev) + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + if (!nic->ecdev) + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + if (!nic->ecdev) { + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + } + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + if (!nic->ecdev) + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), + PCI_DMA_FROMDEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + if (!nic->ecdev) { + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + } + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + if (!nic->ecdev) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + if (!nic->ecdev) { + dev_kfree_skb_any(skb); + } + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + if (nic->ecdev) { + ecdev_receive(nic->ecdev, + skb->data + sizeof(struct rfd), actual_size - fcs_pad); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + if (ecdev_get_link(nic->ecdev)) { + nic->ec_watchdog_jiffies = jiffies; + } + } else { + netif_receive_skb(skb); + } + if (work_done) + (*work_done)++; + } + + if (nic->ecdev) { + // make receive frame descriptior usable again + memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = pci_map_single(nic->pdev, skb->data, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { + rx->dma_addr = 0; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *) rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, + sizeof(struct rfd), PCI_DMA_TODEVICE); + } + } else { + rx->skb = NULL; + } + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + if (!nic->ecdev) { + /* Alloc new skbs to refill list */ + for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if(unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + new_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + + if (!nic->ecdev) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + } + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +void e100_ec_poll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_rx_clean(nic, NULL, 100); + e100_tx_clean(nic); + + if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) { + e100_watchdog_impl(nic); + nic->ec_watchdog_jiffies = jiffies; + } +} + + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + if (!nic->ecdev) { + mod_timer(&nic->watchdog, jiffies); + } + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + if (!nic->ecdev) { + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + } + return 0; + +err_no_irq: + if (!nic->ecdev) + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + if (!nic->ecdev) { + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + } + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + if (!nic->ecdev) { + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + } + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i, err; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + err = mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + err = mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem), + &nic->dma_addr); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + pci_free_consistent(nic->pdev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + if (!nic->ecdev) + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_do_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + // offer device to EtherCAT master module + nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE); + + if (!nic->ecdev) { + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, + "Cannot register net device, aborting\n"); + goto err_out_free; + } + } + + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + if (nic->ecdev) { + err = ecdev_open(nic->ecdev); + if (err) { + ecdev_withdraw(nic->ecdev); + goto err_out_free; + } + } + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + if (nic->ecdev) { + ecdev_close(nic->ecdev); + ecdev_withdraw(nic->ecdev); + } else { + unregister_netdev(netdev); + } + + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (netif_running(netdev)) + e100_down(nic); + netif_device_detach(netdev); + + pci_save_state(pdev); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_clear_master(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +#ifdef CONFIG_PM +static int e100_suspend(struct pci_dev *pdev, pm_message_t state) +{ + bool wake; + __e100_shutdown(pdev, &wake); + return __e100_power_off(pdev, wake); +} + +static int e100_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + netif_device_attach(netdev); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} +#endif /* CONFIG_PM */ + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + if (!nic->ecdev) + netif_device_attach(netdev); + if (nic->ecdev || netif_running(netdev)) { + e100_open(netdev); + if (!nic->ecdev) + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, +#ifdef CONFIG_PM + /* Power Management hooks */ + .suspend = e100_suspend, + .resume = e100_resume, +#endif + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + printk(KERN_INFO DRV_NAME " cleaning up module...\n"); + pci_unregister_driver(&e100_driver); + printk(KERN_INFO DRV_NAME " module cleaned up.\n"); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e100-5.4-orig.c b/devices/e100-5.4-orig.c new file mode 100644 index 00000000..70962967 --- /dev/null +++ b/devices/e100-5.4-orig.c @@ -0,0 +1,3190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DRV_NAME "e100" +#define DRV_EXT "-NAPI" +#define DRV_VERSION "3.5.24-k2"DRV_EXT +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; +MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/** + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags; + unsigned int i; + int err = 0; + + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + spin_lock_irqsave(&nic->cb_lock, flags); + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + spin_unlock_irqrestore(&nic->cb_lock, flags); + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +#define ADVERTISE_FC_SUPPORTED 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if (!in_interrupt() && (err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = pci_map_single(nic->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (pci_dma_mapping_error(nic->pdev, dma_addr)) { + dev_kfree_skb_any(skb); + skb = NULL; + return -ENOMEM; + } + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), + PCI_DMA_FROMDEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + dev_kfree_skb_any(skb); + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + netif_receive_skb(skb); + if (work_done) + (*work_done)++; + } + + rx->skb = NULL; + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + /* Alloc new skbs to refill list */ + for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if (unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + new_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + mod_timer(&nic->watchdog, jiffies); + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + return 0; + +err_no_irq: + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i, err; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + err = mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + err = mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem), + &nic->dma_addr); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + pci_free_consistent(nic->pdev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_do_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); + goto err_out_free; + } + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + unregister_netdev(netdev); + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (netif_running(netdev)) + e100_down(nic); + netif_device_detach(netdev); + + pci_save_state(pdev); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_clear_master(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +#ifdef CONFIG_PM +static int e100_suspend(struct pci_dev *pdev, pm_message_t state) +{ + bool wake; + __e100_shutdown(pdev, &wake); + return __e100_power_off(pdev, wake); +} + +static int e100_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + netif_device_attach(netdev); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} +#endif /* CONFIG_PM */ + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + netif_device_attach(netdev); + if (netif_running(netdev)) { + e100_open(netdev); + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, +#ifdef CONFIG_PM + /* Power Management hooks */ + .suspend = e100_suspend, + .resume = e100_resume, +#endif + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + pci_unregister_driver(&e100_driver); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e100-6.1-ethercat.c b/devices/e100-6.1-ethercat.c new file mode 100644 index 00000000..3aaaabb2 --- /dev/null +++ b/devices/e100-6.1-ethercat.c @@ -0,0 +1,3381 @@ +/****************************************************************************** + * + * $Id$ + * + * Copyright (C) 2007-2012 Florian Pose, Ingenieurgemeinschaft IgH + * + * This file is part of the IgH EtherCAT Master. + * + * The IgH EtherCAT Master is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * The IgH EtherCAT Master is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with the IgH EtherCAT Master; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * --- + * + * The license mentioned above concerns the source code only. Using the + * EtherCAT technology and brand is only permitted in compliance with the + * industrial property and similar rights of Beckhoff Automation GmbH. + * + * --- + * + * vim: noexpandtab + * + *****************************************************************************/ + +/** + \file + EtherCAT driver for e100-compatible NICs. +*/ + +/* Former documentation: */ + +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// EtherCAT includes +#include "../globals.h" +#include "ecdev.h" + +#define DRV_NAME "ec_e100" +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver, EtherCAT enabled" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Florian Pose "); +MODULE_LICENSE("GPL"); + +void e100_ec_poll(struct net_device *); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; + +// prevent from being loaded automatically +//MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/* + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + if (nic->ecdev) + return; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags = 0; + unsigned int i; + int err = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + if (!nic->ecdev) { + spin_lock_irqsave(&nic->cb_lock, flags); + } + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + if (!nic->ecdev) { + spin_unlock_irqrestore(&nic->cb_lock, flags); + } + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags = 0; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + if (!nic->ecdev) + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (nic->ecdev || + (netif_running(nic->netdev) || !(nic->flags & wol_magic))) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if ((err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog_impl(struct nic *nic) +{ + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + if (nic->ecdev) { + ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0); + return; + } + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + + e100_watchdog_impl(nic); + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) + return -ENOMEM; + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + if (!nic->ecdev) + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + if (!nic->ecdev) + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + if (!nic->ecdev) { + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + } + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + if (!nic->ecdev) + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, + rx->prev->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), + DMA_FROM_DEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + if (!nic->ecdev) { + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + } + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + if (!nic->ecdev) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + if (!nic->ecdev) { + dev_kfree_skb_any(skb); + } + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + if (nic->ecdev) { + ecdev_receive(nic->ecdev, + skb->data + sizeof(struct rfd), actual_size - fcs_pad); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + if (ecdev_get_link(nic->ecdev)) { + nic->ec_watchdog_jiffies = jiffies; + } + } else { + netif_receive_skb(skb); + } + if (work_done) + (*work_done)++; + } + + if (nic->ecdev) { + // make receive frame descriptior usable again + memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + rx->dma_addr = 0; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *) rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, rx->prev->dma_addr, + sizeof(struct rfd), DMA_TO_DEVICE); + } + } else { + rx->skb = NULL; + } + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + if (!nic->ecdev) { + /* Alloc new skbs to refill list */ + for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if(unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + new_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + dma_unmap_single(&nic->pdev->dev, + rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + + if (!nic->ecdev) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + } + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +void e100_ec_poll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_rx_clean(nic, NULL, 100); + e100_tx_clean(nic); + + if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) { + e100_watchdog_impl(nic); + nic->ec_watchdog_jiffies = jiffies; + } +} + + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + if (!nic->ecdev) { + mod_timer(&nic->watchdog, jiffies); + } + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + if (!nic->ecdev) { + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + } + return 0; + +err_no_irq: + if (!nic->ecdev) + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + if (!nic->ecdev) { + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + } + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + if (!nic->ecdev) { + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + } + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), + &nic->dma_addr, GFP_KERNEL); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + if (!nic->ecdev) + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_eth_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + eth_hw_addr_set(netdev, (u8 *)nic->eeprom); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + // offer device to EtherCAT master module + nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE); + + if (!nic->ecdev) { + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, + "Cannot register net device, aborting\n"); + goto err_out_free; + } + } + + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + if (nic->ecdev) { + err = ecdev_open(nic->ecdev); + if (err) { + ecdev_withdraw(nic->ecdev); + goto err_out_free; + } + } + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + if (nic->ecdev) { + ecdev_close(nic->ecdev); + ecdev_withdraw(nic->ecdev); + } else { + unregister_netdev(netdev); + } + + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + e100_down(nic); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_disable_device(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int __maybe_unused e100_suspend(struct device *dev_d) +{ + bool wake; + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + return 0; +} + +static int __maybe_unused e100_resume(struct device *dev_d) +{ + struct net_device *netdev = dev_get_drvdata(dev_d); + struct nic *nic = netdev_priv(netdev); + int err; + + err = pci_enable_device(to_pci_dev(dev_d)); + if (err) { + netdev_err(netdev, "Resume cannot enable PCI device, aborting\n"); + return err; + } + pci_set_master(to_pci_dev(dev_d)); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + if (netif_running(netdev)) + e100_up(nic); + + netif_device_attach(netdev); + + return 0; +} + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + if (!nic->ecdev) + netif_device_attach(netdev); + if (nic->ecdev || netif_running(netdev)) { + e100_open(netdev); + if (!nic->ecdev) + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, + + /* Power Management hooks */ + .driver.pm = &e100_pm_ops, + + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s\n", DRV_DESCRIPTION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + printk(KERN_INFO DRV_NAME " cleaning up module...\n"); + pci_unregister_driver(&e100_driver); + printk(KERN_INFO DRV_NAME " module cleaned up.\n"); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e100-6.1-orig.c b/devices/e100-6.1-orig.c new file mode 100644 index 00000000..d3fdc290 --- /dev/null +++ b/devices/e100-6.1-orig.c @@ -0,0 +1,3196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DRV_NAME "e100" +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; +MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/* + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags; + unsigned int i; + int err = 0; + + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + spin_lock_irqsave(&nic->cb_lock, flags); + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + spin_unlock_irqrestore(&nic->cb_lock, flags); + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if ((err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) + return -ENOMEM; + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, + rx->prev->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), + DMA_FROM_DEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + dev_kfree_skb_any(skb); + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + netif_receive_skb(skb); + if (work_done) + (*work_done)++; + } + + rx->skb = NULL; + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + /* Alloc new skbs to refill list */ + for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if (unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + new_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + dma_unmap_single(&nic->pdev->dev, + rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + mod_timer(&nic->watchdog, jiffies); + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + return 0; + +err_no_irq: + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), + &nic->dma_addr, GFP_KERNEL); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_eth_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + eth_hw_addr_set(netdev, (u8 *)nic->eeprom); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); + goto err_out_free; + } + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + unregister_netdev(netdev); + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + e100_down(nic); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_disable_device(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int __maybe_unused e100_suspend(struct device *dev_d) +{ + bool wake; + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + return 0; +} + +static int __maybe_unused e100_resume(struct device *dev_d) +{ + struct net_device *netdev = dev_get_drvdata(dev_d); + struct nic *nic = netdev_priv(netdev); + int err; + + err = pci_enable_device(to_pci_dev(dev_d)); + if (err) { + netdev_err(netdev, "Resume cannot enable PCI device, aborting\n"); + return err; + } + pci_set_master(to_pci_dev(dev_d)); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + if (netif_running(netdev)) + e100_up(nic); + + netif_device_attach(netdev); + + return 0; +} + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + netif_device_attach(netdev); + if (netif_running(netdev)) { + e100_open(netdev); + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, + + /* Power Management hooks */ + .driver.pm = &e100_pm_ops, + + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s\n", DRV_DESCRIPTION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + pci_unregister_driver(&e100_driver); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e100-6.4-ethercat.c b/devices/e100-6.4-ethercat.c new file mode 100644 index 00000000..3aaaabb2 --- /dev/null +++ b/devices/e100-6.4-ethercat.c @@ -0,0 +1,3381 @@ +/****************************************************************************** + * + * $Id$ + * + * Copyright (C) 2007-2012 Florian Pose, Ingenieurgemeinschaft IgH + * + * This file is part of the IgH EtherCAT Master. + * + * The IgH EtherCAT Master is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * The IgH EtherCAT Master is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with the IgH EtherCAT Master; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * --- + * + * The license mentioned above concerns the source code only. Using the + * EtherCAT technology and brand is only permitted in compliance with the + * industrial property and similar rights of Beckhoff Automation GmbH. + * + * --- + * + * vim: noexpandtab + * + *****************************************************************************/ + +/** + \file + EtherCAT driver for e100-compatible NICs. +*/ + +/* Former documentation: */ + +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// EtherCAT includes +#include "../globals.h" +#include "ecdev.h" + +#define DRV_NAME "ec_e100" +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver, EtherCAT enabled" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Florian Pose "); +MODULE_LICENSE("GPL"); + +void e100_ec_poll(struct net_device *); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; + +// prevent from being loaded automatically +//MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/* + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + if (nic->ecdev) + return; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags = 0; + unsigned int i; + int err = 0; + + if (!nic->ecdev) + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + if (!nic->ecdev) { + spin_lock_irqsave(&nic->cb_lock, flags); + } + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + if (!nic->ecdev) { + spin_unlock_irqrestore(&nic->cb_lock, flags); + } + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags = 0; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + if (!nic->ecdev) + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + if (!nic->ecdev) + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (nic->ecdev || + (netif_running(nic->netdev) || !(nic->flags & wol_magic))) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if ((err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog_impl(struct nic *nic) +{ + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + if (nic->ecdev) { + ecdev_set_link(nic->ecdev, mii_link_ok(&nic->mii) ? 1 : 0); + return; + } + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + + e100_watchdog_impl(nic); + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) + return -ENOMEM; + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + if (!nic->ecdev) + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + if (!nic->ecdev) + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + if (!nic->ecdev) + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + if (!nic->ecdev) { + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + } + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + if (!nic->ecdev) + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, + rx->prev->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), + DMA_FROM_DEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + if (!nic->ecdev) { + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + } + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + if (!nic->ecdev) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + if (!nic->ecdev) { + dev_kfree_skb_any(skb); + } + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + if (nic->ecdev) { + ecdev_receive(nic->ecdev, + skb->data + sizeof(struct rfd), actual_size - fcs_pad); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + if (ecdev_get_link(nic->ecdev)) { + nic->ec_watchdog_jiffies = jiffies; + } + } else { + netif_receive_skb(skb); + } + if (work_done) + (*work_done)++; + } + + if (nic->ecdev) { + // make receive frame descriptior usable again + memcpy(skb->data, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + rx->dma_addr = 0; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *) rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, rx->prev->dma_addr, + sizeof(struct rfd), DMA_TO_DEVICE); + } + } else { + rx->skb = NULL; + } + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + if (!nic->ecdev) { + /* Alloc new skbs to refill list */ + for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if(unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + new_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + dma_unmap_single(&nic->pdev->dev, + rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + + if (!nic->ecdev) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + } + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (!nic->ecdev && likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +void e100_ec_poll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_rx_clean(nic, NULL, 100); + e100_tx_clean(nic); + + if (jiffies - nic->ec_watchdog_jiffies >= 2 * HZ) { + e100_watchdog_impl(nic); + nic->ec_watchdog_jiffies = jiffies; + } +} + + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + if (!nic->ecdev) { + mod_timer(&nic->watchdog, jiffies); + } + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + if (!nic->ecdev) { + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + } + return 0; + +err_no_irq: + if (!nic->ecdev) + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + if (!nic->ecdev) { + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + } + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + if (!nic->ecdev) { + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + } + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), + &nic->dma_addr, GFP_KERNEL); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + if (!nic->ecdev) + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_eth_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + eth_hw_addr_set(netdev, (u8 *)nic->eeprom); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + // offer device to EtherCAT master module + nic->ecdev = ecdev_offer(netdev, e100_ec_poll, THIS_MODULE); + + if (!nic->ecdev) { + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, + "Cannot register net device, aborting\n"); + goto err_out_free; + } + } + + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + if (nic->ecdev) { + err = ecdev_open(nic->ecdev); + if (err) { + ecdev_withdraw(nic->ecdev); + goto err_out_free; + } + } + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + if (nic->ecdev) { + ecdev_close(nic->ecdev); + ecdev_withdraw(nic->ecdev); + } else { + unregister_netdev(netdev); + } + + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + e100_down(nic); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_disable_device(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int __maybe_unused e100_suspend(struct device *dev_d) +{ + bool wake; + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + return 0; +} + +static int __maybe_unused e100_resume(struct device *dev_d) +{ + struct net_device *netdev = dev_get_drvdata(dev_d); + struct nic *nic = netdev_priv(netdev); + int err; + + err = pci_enable_device(to_pci_dev(dev_d)); + if (err) { + netdev_err(netdev, "Resume cannot enable PCI device, aborting\n"); + return err; + } + pci_set_master(to_pci_dev(dev_d)); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + if (netif_running(netdev)) + e100_up(nic); + + netif_device_attach(netdev); + + return 0; +} + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (nic->ecdev) + return -EBUSY; + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + if (!nic->ecdev) + netif_device_attach(netdev); + if (nic->ecdev || netif_running(netdev)) { + e100_open(netdev); + if (!nic->ecdev) + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, + + /* Power Management hooks */ + .driver.pm = &e100_pm_ops, + + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s\n", DRV_DESCRIPTION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + printk(KERN_INFO DRV_NAME " cleaning up module...\n"); + pci_unregister_driver(&e100_driver); + printk(KERN_INFO DRV_NAME " module cleaned up.\n"); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e100-6.4-orig.c b/devices/e100-6.4-orig.c new file mode 100644 index 00000000..d3fdc290 --- /dev/null +++ b/devices/e100-6.4-orig.c @@ -0,0 +1,3196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DRV_NAME "e100" +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "e100/d101m_ucode.bin" +#define FIRMWARE_D101S "e100/d101s_ucode.bin" +#define FIRMWARE_D102E "e100/d102e_ucode.bin" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE(FIRMWARE_D101M); +MODULE_FIRMWARE(FIRMWARE_D101S); +MODULE_FIRMWARE(FIRMWARE_D102E); + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; +MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/* + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct dma_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags; + unsigned int i; + int err = 0; + + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err; + + spin_lock_irqsave(&nic->cb_lock, flags); + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + spin_unlock_irqrestore(&nic->cb_lock, flags); + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * request_firmware() cannot be used. + */ + if (!fw) + err = request_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if ((err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog(struct timer_list *t) +{ + struct nic *nic = from_timer(nic, t, watchdog); + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (dma_mapping_error(&nic->pdev->dev, dma_addr)) + return -ENOMEM; + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + dma_unmap_single(&nic->pdev->dev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + DMA_TO_DEVICE); + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + dma_sync_single_for_device(&nic->pdev->dev, + rx->prev->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), + DMA_FROM_DEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + dev_kfree_skb_any(skb); + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + netif_receive_skb(skb); + if (work_done) + (*work_done)++; + } + + rx->skb = NULL; + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + /* Alloc new skbs to refill list */ + for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if (unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + new_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + dma_sync_single_for_device(&nic->pdev->dev, + old_before_last_rx->dma_addr, + sizeof(struct rfd), + DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + dma_unmap_single(&nic->pdev->dev, + rx->dma_addr, RFD_BUF_LEN, + DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, + sizeof(struct rfd), DMA_BIDIRECTIONAL); + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) + e100_enable_irq(nic); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && + !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && + ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + mod_timer(&nic->watchdog, jiffies); + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + return 0; + +err_no_irq: + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + + mii_ethtool_get_link_ksettings(&nic->mii, cmd); + + return 0; +} + +static int e100_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1D +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* We know the number of registers, and the size of the dump buffer. + * Calculate the total size in bytes. + */ + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = 0; i < E100_PHY_REGS; i++) + /* Note that we read the registers in reverse order. This + * ordering is the ABI apparently used by ethtool and other + * applications. + */ + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, + E100_PHY_REGS - 1 - i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e100_get_link_ksettings, + .set_link_ksettings = e100_set_link_ksettings, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), + &nic->dma_addr, GFP_KERNEL); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 1; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_eth_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + timer_setup(&nic->watchdog, e100_watchdog, 0); + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + eth_hw_addr_set(netdev, (u8 *)nic->eeprom); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); + goto err_out_free; + } + nic->cbs_pool = dma_pool_create(netdev->name, + &nic->pdev->dev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + return 0; + +err_out_pool: + unregister_netdev(netdev); +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + unregister_netdev(netdev); + e100_free(nic); + pci_iounmap(pdev, nic->csr); + dma_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + e100_down(nic); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_disable_device(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int __maybe_unused e100_suspend(struct device *dev_d) +{ + bool wake; + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + return 0; +} + +static int __maybe_unused e100_resume(struct device *dev_d) +{ + struct net_device *netdev = dev_get_drvdata(dev_d); + struct nic *nic = netdev_priv(netdev); + int err; + + err = pci_enable_device(to_pci_dev(dev_d)); + if (err) { + netdev_err(netdev, "Resume cannot enable PCI device, aborting\n"); + return err; + } + pci_set_master(to_pci_dev(dev_d)); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + if (netif_running(netdev)) + e100_up(nic); + + netif_device_attach(netdev); + + return 0; +} + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + netif_device_attach(netdev); + if (netif_running(netdev)) { + e100_open(netdev); + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, + + /* Power Management hooks */ + .driver.pm = &e100_pm_ops, + + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s\n", DRV_DESCRIPTION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + pci_unregister_driver(&e100_driver); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/devices/e1000/Makefile.am b/devices/e1000/Makefile.am index 5c5513dc..763f68c2 100644 --- a/devices/e1000/Makefile.am +++ b/devices/e1000/Makefile.am @@ -76,6 +76,14 @@ EXTRA_DIST = \ e1000-3.8-orig.h \ e1000-4.4-ethercat.h \ e1000-4.4-orig.h \ + e1000-5.10-ethercat.h \ + e1000-5.10-orig.h \ + e1000-5.14-ethercat.h \ + e1000-5.14-orig.h \ + e1000-5.15-ethercat.h \ + e1000-5.15-orig.h \ + e1000-6.1-ethercat.h \ + e1000-6.1-orig.h \ e1000_ethtool-2.6.13-ethercat.c \ e1000_ethtool-2.6.13-orig.c \ e1000_ethtool-2.6.18-ethercat.c \ @@ -122,6 +130,14 @@ EXTRA_DIST = \ e1000_ethtool-3.8-orig.c \ e1000_ethtool-4.4-ethercat.c \ e1000_ethtool-4.4-orig.c \ + e1000_ethtool-5.10-ethercat.c \ + e1000_ethtool-5.10-orig.c \ + e1000_ethtool-5.14-ethercat.c \ + e1000_ethtool-5.14-orig.c \ + e1000_ethtool-5.15-ethercat.c \ + e1000_ethtool-5.15-orig.c \ + e1000_ethtool-6.1-ethercat.c \ + e1000_ethtool-6.1-orig.c \ e1000_hw-2.6.13-ethercat.c \ e1000_hw-2.6.13-ethercat.h \ e1000_hw-2.6.13-orig.c \ @@ -214,6 +230,22 @@ EXTRA_DIST = \ e1000_hw-4.4-ethercat.h \ e1000_hw-4.4-orig.c \ e1000_hw-4.4-orig.h \ + e1000_hw-5.10-ethercat.c \ + e1000_hw-5.10-ethercat.h \ + e1000_hw-5.10-orig.c \ + e1000_hw-5.10-orig.h \ + e1000_hw-5.14-ethercat.c \ + e1000_hw-5.14-ethercat.h \ + e1000_hw-5.14-orig.c \ + e1000_hw-5.14-orig.h \ + e1000_hw-5.15-ethercat.c \ + e1000_hw-5.15-ethercat.h \ + e1000_hw-5.15-orig.c \ + e1000_hw-5.15-orig.h \ + e1000_hw-6.1-ethercat.c \ + e1000_hw-6.1-ethercat.h \ + e1000_hw-6.1-orig.c \ + e1000_hw-6.1-orig.h \ e1000_main-2.6.13-ethercat.c \ e1000_main-2.6.13-orig.c \ e1000_main-2.6.18-ethercat.c \ @@ -260,6 +292,14 @@ EXTRA_DIST = \ e1000_main-3.8-orig.c \ e1000_main-4.4-ethercat.c \ e1000_main-4.4-orig.c \ + e1000_main-5.10-ethercat.c \ + e1000_main-5.10-orig.c \ + e1000_main-5.14-ethercat.c \ + e1000_main-5.14-orig.c \ + e1000_main-5.15-ethercat.c \ + e1000_main-5.15-orig.c \ + e1000_main-6.1-ethercat.c \ + e1000_main-6.1-orig.c \ e1000_osdep-2.6.13-ethercat.h \ e1000_osdep-2.6.13-orig.h \ e1000_osdep-2.6.18-ethercat.h \ @@ -306,6 +346,14 @@ EXTRA_DIST = \ e1000_osdep-3.8-orig.h \ e1000_osdep-4.4-ethercat.h \ e1000_osdep-4.4-orig.h \ + e1000_osdep-5.10-ethercat.h \ + e1000_osdep-5.10-orig.h \ + e1000_osdep-5.14-ethercat.h \ + e1000_osdep-5.14-orig.h \ + e1000_osdep-5.15-ethercat.h \ + e1000_osdep-5.15-orig.h \ + e1000_osdep-6.1-ethercat.h \ + e1000_osdep-6.1-orig.h \ e1000_param-2.6.13-ethercat.c \ e1000_param-2.6.13-orig.c \ e1000_param-2.6.18-ethercat.c \ @@ -351,6 +399,14 @@ EXTRA_DIST = \ e1000_param-3.8-ethercat.c \ e1000_param-3.8-orig.c \ e1000_param-4.4-ethercat.c \ - e1000_param-4.4-orig.c + e1000_param-4.4-orig.c \ + e1000_param-5.10-ethercat.c \ + e1000_param-5.10-orig.c \ + e1000_param-5.14-ethercat.c \ + e1000_param-5.14-orig.c \ + e1000_param-5.15-ethercat.c \ + e1000_param-5.15-orig.c \ + e1000_param-6.1-ethercat.c \ + e1000_param-6.1-orig.c #------------------------------------------------------------------------------ diff --git a/devices/e1000/e1000-5.10-ethercat.h b/devices/e1000/e1000-5.10-ethercat.h new file mode 100644 index 00000000..87301858 --- /dev/null +++ b/devices/e1000/e1000-5.10-ethercat.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ecdev.h" + + +#define BAR_0 0 +#define BAR_1 1 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw-5.10-ethercat.h" + +#define E1000_MAX_INTR 10 + +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + bool mapped_as_page; + unsigned short segs; + unsigned int bytecount; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_tx_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_rx_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; + + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN, + __E1000_DISABLED +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; + +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_update_stats(struct e1000_adapter *adapter); +bool e1000_has_link(struct e1000_adapter *adapter); +void e1000_power_up_phy(struct e1000_adapter *); +void e1000_set_ethtool_ops(struct net_device *netdev); +void e1000_check_options(struct e1000_adapter *adapter); +char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/devices/e1000/e1000-5.10-orig.h b/devices/e1000/e1000-5.10-orig.h new file mode 100644 index 00000000..4817eb13 --- /dev/null +++ b/devices/e1000/e1000-5.10-orig.h @@ -0,0 +1,352 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BAR_0 0 +#define BAR_1 1 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw.h" + +#define E1000_MAX_INTR 10 + +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + bool mapped_as_page; + unsigned short segs; + unsigned int bytecount; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_tx_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_rx_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN, + __E1000_DISABLED +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; + +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_update_stats(struct e1000_adapter *adapter); +bool e1000_has_link(struct e1000_adapter *adapter); +void e1000_power_up_phy(struct e1000_adapter *); +void e1000_set_ethtool_ops(struct net_device *netdev); +void e1000_check_options(struct e1000_adapter *adapter); +char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/devices/e1000/e1000-5.14-ethercat.h b/devices/e1000/e1000-5.14-ethercat.h new file mode 100644 index 00000000..023fee7b --- /dev/null +++ b/devices/e1000/e1000-5.14-ethercat.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ecdev.h" + + +#define BAR_0 0 +#define BAR_1 1 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw-5.14-ethercat.h" + +#define E1000_MAX_INTR 10 + +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + bool mapped_as_page; + unsigned short segs; + unsigned int bytecount; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_tx_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_rx_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; + + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN, + __E1000_DISABLED +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; + +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_update_stats(struct e1000_adapter *adapter); +bool e1000_has_link(struct e1000_adapter *adapter); +void e1000_power_up_phy(struct e1000_adapter *); +void e1000_set_ethtool_ops(struct net_device *netdev); +void e1000_check_options(struct e1000_adapter *adapter); +char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/devices/e1000/e1000-5.14-orig.h b/devices/e1000/e1000-5.14-orig.h new file mode 100644 index 00000000..4817eb13 --- /dev/null +++ b/devices/e1000/e1000-5.14-orig.h @@ -0,0 +1,352 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BAR_0 0 +#define BAR_1 1 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw.h" + +#define E1000_MAX_INTR 10 + +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + bool mapped_as_page; + unsigned short segs; + unsigned int bytecount; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_tx_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_rx_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN, + __E1000_DISABLED +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; + +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_update_stats(struct e1000_adapter *adapter); +bool e1000_has_link(struct e1000_adapter *adapter); +void e1000_power_up_phy(struct e1000_adapter *); +void e1000_set_ethtool_ops(struct net_device *netdev); +void e1000_check_options(struct e1000_adapter *adapter); +char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/devices/e1000/e1000-5.15-ethercat.h b/devices/e1000/e1000-5.15-ethercat.h new file mode 100644 index 00000000..b94866b7 --- /dev/null +++ b/devices/e1000/e1000-5.15-ethercat.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ecdev.h" + + +#define BAR_0 0 +#define BAR_1 1 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw-5.15-ethercat.h" + +#define E1000_MAX_INTR 10 + +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + bool mapped_as_page; + unsigned short segs; + unsigned int bytecount; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_tx_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_rx_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; + + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN, + __E1000_DISABLED +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; + +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_update_stats(struct e1000_adapter *adapter); +bool e1000_has_link(struct e1000_adapter *adapter); +void e1000_power_up_phy(struct e1000_adapter *); +void e1000_set_ethtool_ops(struct net_device *netdev); +void e1000_check_options(struct e1000_adapter *adapter); +char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/devices/e1000/e1000-5.15-orig.h b/devices/e1000/e1000-5.15-orig.h new file mode 100644 index 00000000..4817eb13 --- /dev/null +++ b/devices/e1000/e1000-5.15-orig.h @@ -0,0 +1,352 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BAR_0 0 +#define BAR_1 1 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw.h" + +#define E1000_MAX_INTR 10 + +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + bool mapped_as_page; + unsigned short segs; + unsigned int bytecount; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_tx_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_rx_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN, + __E1000_DISABLED +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; + +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_update_stats(struct e1000_adapter *adapter); +bool e1000_has_link(struct e1000_adapter *adapter); +void e1000_power_up_phy(struct e1000_adapter *); +void e1000_set_ethtool_ops(struct net_device *netdev); +void e1000_check_options(struct e1000_adapter *adapter); +char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/devices/e1000/e1000-6.1-ethercat.h b/devices/e1000/e1000-6.1-ethercat.h new file mode 100644 index 00000000..30546b83 --- /dev/null +++ b/devices/e1000/e1000-6.1-ethercat.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ecdev.h" + + +#define BAR_0 0 +#define BAR_1 1 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw-6.1-ethercat.h" + +#define E1000_MAX_INTR 10 + +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + bool mapped_as_page; + unsigned short segs; + unsigned int bytecount; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_tx_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_rx_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; + + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN, + __E1000_DISABLED +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; + +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_update_stats(struct e1000_adapter *adapter); +bool e1000_has_link(struct e1000_adapter *adapter); +void e1000_power_up_phy(struct e1000_adapter *); +void e1000_set_ethtool_ops(struct net_device *netdev); +void e1000_check_options(struct e1000_adapter *adapter); +char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/devices/e1000/e1000-6.1-orig.h b/devices/e1000/e1000-6.1-orig.h new file mode 100644 index 00000000..4817eb13 --- /dev/null +++ b/devices/e1000/e1000-6.1-orig.h @@ -0,0 +1,352 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BAR_0 0 +#define BAR_1 1 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw.h" + +#define E1000_MAX_INTR 10 + +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + bool mapped_as_page; + unsigned short segs; + unsigned int bytecount; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_tx_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_rx_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN, + __E1000_DISABLED +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; + +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_update_stats(struct e1000_adapter *adapter); +bool e1000_has_link(struct e1000_adapter *adapter); +void e1000_power_up_phy(struct e1000_adapter *); +void e1000_set_ethtool_ops(struct net_device *netdev); +void e1000_check_options(struct e1000_adapter *adapter); +char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/devices/e1000/e1000_ethtool-5.10-ethercat.c b/devices/e1000/e1000_ethtool-5.10-ethercat.c new file mode 100644 index 00000000..32bcd8ec --- /dev/null +++ b/devices/e1000/e1000_ethtool-5.10-ethercat.c @@ -0,0 +1,1885 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* ethtool support for e1000 */ + +#include "e1000-5.10-ethercat.h" +#include +#include + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 supported, advertising; + + if (hw->media_type == e1000_media_type_copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; + } else { + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + cmd->base.port = PORT_FIBRE; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + cmd->base.speed = adapter->link_speed; + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF + */ + if (adapter->link_duplex == FULL_DUPLEX) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : ETH_TP_MDI); + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int e1000_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc == E1000_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + err = 0; + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again + */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + } + kfree(tx_old); + kfree(rx_old); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; + +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + if (netif_running(adapter->netdev)) + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); + } + } else { + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + kfree(rxdr->buffer_info[i].rxbuf.data); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { + ret_val = 6; + goto err_nomem; + } + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + u8 *buf; + + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { + ret_val = 7; + goto err_nomem; + } + rxdr->buffer_info[i].rxbuf.data = buf; + + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. + */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_set_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; +} + +static int e1000_check_lbtest_frame(const unsigned char *data, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val = 0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) + k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && time_after(time + 20, jiffies)); + + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after_eq(jiffies, time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + e1000_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + e1000_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware + */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + const struct e1000_stats *stat = e1000_gstrings_stats; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) { + char *p; + + switch (stat->type) { + case NETDEV_STATS: + p = (char *)netdev + stat->stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + stat->stat_offset; + break; + default: + netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); + continue; + } + + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e1000_get_link_ksettings, + .set_link_ksettings = e1000_set_link_ksettings, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/devices/e1000/e1000_ethtool-5.10-orig.c b/devices/e1000/e1000_ethtool-5.10-orig.c new file mode 100644 index 00000000..f976e9da --- /dev/null +++ b/devices/e1000/e1000_ethtool-5.10-orig.c @@ -0,0 +1,1885 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* ethtool support for e1000 */ + +#include "e1000.h" +#include +#include + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 supported, advertising; + + if (hw->media_type == e1000_media_type_copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; + } else { + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + cmd->base.port = PORT_FIBRE; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + cmd->base.speed = adapter->link_speed; + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF + */ + if (adapter->link_duplex == FULL_DUPLEX) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : ETH_TP_MDI); + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int e1000_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc == E1000_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + err = 0; + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again + */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + } + kfree(tx_old); + kfree(rx_old); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; + +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + if (netif_running(adapter->netdev)) + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); + } + } else { + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + kfree(rxdr->buffer_info[i].rxbuf.data); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { + ret_val = 6; + goto err_nomem; + } + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + u8 *buf; + + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { + ret_val = 7; + goto err_nomem; + } + rxdr->buffer_info[i].rxbuf.data = buf; + + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. + */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_set_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; +} + +static int e1000_check_lbtest_frame(const unsigned char *data, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val = 0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) + k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && time_after(time + 20, jiffies)); + + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after_eq(jiffies, time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + e1000_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + e1000_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware + */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + const struct e1000_stats *stat = e1000_gstrings_stats; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) { + char *p; + + switch (stat->type) { + case NETDEV_STATS: + p = (char *)netdev + stat->stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + stat->stat_offset; + break; + default: + netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); + continue; + } + + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e1000_get_link_ksettings, + .set_link_ksettings = e1000_set_link_ksettings, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/devices/e1000/e1000_ethtool-5.14-ethercat.c b/devices/e1000/e1000_ethtool-5.14-ethercat.c new file mode 100644 index 00000000..dfb4617b --- /dev/null +++ b/devices/e1000/e1000_ethtool-5.14-ethercat.c @@ -0,0 +1,1922 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* ethtool support for e1000 */ + +#include "e1000-5.14-ethercat.h" +#include +#include + +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 supported, advertising; + + if (hw->media_type == e1000_media_type_copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; + } else { + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + cmd->base.port = PORT_FIBRE; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + cmd->base.speed = adapter->link_speed; + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF + */ + if (adapter->link_duplex == FULL_DUPLEX) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : ETH_TP_MDI); + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int e1000_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc == E1000_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void e1000_get_ringparam(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5 + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int e1000_set_ringparam(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5 + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + err = 0; + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again + */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + } + kfree(tx_old); + kfree(rx_old); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; + +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + if (netif_running(adapter->netdev)) + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); + } + } else { + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + kfree(rxdr->buffer_info[i].rxbuf.data); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { + ret_val = 6; + goto err_nomem; + } + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + u8 *buf; + + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { + ret_val = 7; + goto err_nomem; + } + rxdr->buffer_info[i].rxbuf.data = buf; + + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. + */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_set_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; +} + +static int e1000_check_lbtest_frame(const unsigned char *data, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val = 0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) + k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && time_after(time + 20, jiffies)); + + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after_eq(jiffies, time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + e1000_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + e1000_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware + */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 4 + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) + +#else + struct ethtool_coalesce *ec) +#endif +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 4 + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + const struct e1000_stats *stat = e1000_gstrings_stats; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) { + char *p; + + switch (stat->type) { + case NETDEV_STATS: + p = (char *)netdev + stat->stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + stat->stat_offset; + break; + default: + netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); + continue; + } + + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e1000_get_link_ksettings, + .set_link_ksettings = e1000_set_link_ksettings, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/devices/e1000/e1000_ethtool-5.14-orig.c b/devices/e1000/e1000_ethtool-5.14-orig.c new file mode 100644 index 00000000..3c51ee94 --- /dev/null +++ b/devices/e1000/e1000_ethtool-5.14-orig.c @@ -0,0 +1,1885 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* ethtool support for e1000 */ + +#include "e1000.h" +#include +#include + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 supported, advertising; + + if (hw->media_type == e1000_media_type_copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; + } else { + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + cmd->base.port = PORT_FIBRE; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + cmd->base.speed = adapter->link_speed; + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF + */ + if (adapter->link_duplex == FULL_DUPLEX) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : ETH_TP_MDI); + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int e1000_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc == E1000_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + err = 0; + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again + */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + } + kfree(tx_old); + kfree(rx_old); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; + +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + if (netif_running(adapter->netdev)) + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); + } + } else { + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + kfree(rxdr->buffer_info[i].rxbuf.data); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { + ret_val = 6; + goto err_nomem; + } + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + u8 *buf; + + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { + ret_val = 7; + goto err_nomem; + } + rxdr->buffer_info[i].rxbuf.data = buf; + + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. + */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_set_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; +} + +static int e1000_check_lbtest_frame(const unsigned char *data, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val = 0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) + k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && time_after(time + 20, jiffies)); + + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after_eq(jiffies, time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + e1000_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + e1000_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware + */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + const struct e1000_stats *stat = e1000_gstrings_stats; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) { + char *p; + + switch (stat->type) { + case NETDEV_STATS: + p = (char *)netdev + stat->stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + stat->stat_offset; + break; + default: + netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); + continue; + } + + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e1000_get_link_ksettings, + .set_link_ksettings = e1000_set_link_ksettings, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/devices/e1000/e1000_ethtool-5.15-ethercat.c b/devices/e1000/e1000_ethtool-5.15-ethercat.c new file mode 100644 index 00000000..f024ea1b --- /dev/null +++ b/devices/e1000/e1000_ethtool-5.15-ethercat.c @@ -0,0 +1,1889 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* ethtool support for e1000 */ + +#include "e1000-5.15-ethercat.h" +#include +#include + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 supported, advertising; + + if (hw->media_type == e1000_media_type_copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; + } else { + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + cmd->base.port = PORT_FIBRE; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + cmd->base.speed = adapter->link_speed; + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF + */ + if (adapter->link_duplex == FULL_DUPLEX) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : ETH_TP_MDI); + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int e1000_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc == E1000_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + err = 0; + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again + */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + } + kfree(tx_old); + kfree(rx_old); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; + +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + if (netif_running(adapter->netdev)) + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); + } + } else { + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + kfree(rxdr->buffer_info[i].rxbuf.data); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { + ret_val = 6; + goto err_nomem; + } + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + u8 *buf; + + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { + ret_val = 7; + goto err_nomem; + } + rxdr->buffer_info[i].rxbuf.data = buf; + + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. + */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_set_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; +} + +static int e1000_check_lbtest_frame(const unsigned char *data, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val = 0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) + k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && time_after(time + 20, jiffies)); + + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after_eq(jiffies, time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + e1000_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + e1000_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware + */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + const struct e1000_stats *stat = e1000_gstrings_stats; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) { + char *p; + + switch (stat->type) { + case NETDEV_STATS: + p = (char *)netdev + stat->stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + stat->stat_offset; + break; + default: + netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); + continue; + } + + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e1000_get_link_ksettings, + .set_link_ksettings = e1000_set_link_ksettings, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/devices/e1000/e1000_ethtool-5.15-orig.c b/devices/e1000/e1000_ethtool-5.15-orig.c new file mode 100644 index 00000000..0a57172d --- /dev/null +++ b/devices/e1000/e1000_ethtool-5.15-orig.c @@ -0,0 +1,1889 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* ethtool support for e1000 */ + +#include "e1000.h" +#include +#include + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 supported, advertising; + + if (hw->media_type == e1000_media_type_copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; + } else { + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + cmd->base.port = PORT_FIBRE; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + cmd->base.speed = adapter->link_speed; + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF + */ + if (adapter->link_duplex == FULL_DUPLEX) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : ETH_TP_MDI); + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int e1000_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc == E1000_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + err = 0; + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again + */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + } + kfree(tx_old); + kfree(rx_old); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; + +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + if (netif_running(adapter->netdev)) + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); + } + } else { + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + kfree(rxdr->buffer_info[i].rxbuf.data); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { + ret_val = 6; + goto err_nomem; + } + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + u8 *buf; + + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { + ret_val = 7; + goto err_nomem; + } + rxdr->buffer_info[i].rxbuf.data = buf; + + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. + */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_set_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; +} + +static int e1000_check_lbtest_frame(const unsigned char *data, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val = 0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) + k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && time_after(time + 20, jiffies)); + + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after_eq(jiffies, time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + e1000_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + e1000_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware + */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + const struct e1000_stats *stat = e1000_gstrings_stats; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) { + char *p; + + switch (stat->type) { + case NETDEV_STATS: + p = (char *)netdev + stat->stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + stat->stat_offset; + break; + default: + netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); + continue; + } + + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e1000_get_link_ksettings, + .set_link_ksettings = e1000_set_link_ksettings, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/devices/e1000/e1000_ethtool-6.1-ethercat.c b/devices/e1000/e1000_ethtool-6.1-ethercat.c new file mode 100644 index 00000000..3cbb33c1 --- /dev/null +++ b/devices/e1000/e1000_ethtool-6.1-ethercat.c @@ -0,0 +1,1893 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* ethtool support for e1000 */ + +#include "e1000-6.1-ethercat.h" +#include +#include + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 supported, advertising; + + if (hw->media_type == e1000_media_type_copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; + } else { + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + cmd->base.port = PORT_FIBRE; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + cmd->base.speed = adapter->link_speed; + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF + */ + if (adapter->link_duplex == FULL_DUPLEX) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : ETH_TP_MDI); + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int e1000_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc == E1000_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strscpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + err = 0; + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again + */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + } + kfree(tx_old); + kfree(rx_old); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; + +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + if (netif_running(adapter->netdev)) + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); + } + } else { + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + kfree(rxdr->buffer_info[i].rxbuf.data); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { + ret_val = 6; + goto err_nomem; + } + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + u8 *buf; + + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { + ret_val = 7; + goto err_nomem; + } + rxdr->buffer_info[i].rxbuf.data = buf; + + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. + */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_set_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; +} + +static int e1000_check_lbtest_frame(const unsigned char *data, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val = 0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) + k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && time_after(time + 20, jiffies)); + + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after_eq(jiffies, time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + e1000_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + e1000_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware + */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + const struct e1000_stats *stat = e1000_gstrings_stats; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) { + char *p; + + switch (stat->type) { + case NETDEV_STATS: + p = (char *)netdev + stat->stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + stat->stat_offset; + break; + default: + netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); + continue; + } + + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e1000_get_link_ksettings, + .set_link_ksettings = e1000_set_link_ksettings, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/devices/e1000/e1000_ethtool-6.1-orig.c b/devices/e1000/e1000_ethtool-6.1-orig.c new file mode 100644 index 00000000..d06d29c6 --- /dev/null +++ b/devices/e1000/e1000_ethtool-6.1-orig.c @@ -0,0 +1,1893 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* ethtool support for e1000 */ + +#include "e1000.h" +#include +#include + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 supported, advertising; + + if (hw->media_type == e1000_media_type_copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy_addr; + } else { + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + cmd->base.port = PORT_FIBRE; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + cmd->base.speed = adapter->link_speed; + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF + */ + if (adapter->link_duplex == FULL_DUPLEX) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : ETH_TP_MDI); + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int e1000_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc == E1000_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strscpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + err = 0; + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again + */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + } + kfree(tx_old); + kfree(rx_old); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; + +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + if (netif_running(adapter->netdev)) + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); + } + } else { + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + kfree(rxdr->buffer_info[i].rxbuf.data); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { + ret_val = 6; + goto err_nomem; + } + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + u8 *buf; + + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { + ret_val = 7; + goto err_nomem; + } + rxdr->buffer_info[i].rxbuf.data = buf; + + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. + */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_set_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; +} + +static int e1000_check_lbtest_frame(const unsigned char *data, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val = 0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) + k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && time_after(time + 20, jiffies)); + + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after_eq(jiffies, time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + e1000_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + e1000_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware + */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + const struct e1000_stats *stat = e1000_gstrings_stats; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) { + char *p; + + switch (stat->type) { + case NETDEV_STATS: + p = (char *)netdev + stat->stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + stat->stat_offset; + break; + default: + netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); + continue; + } + + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = e1000_get_link_ksettings, + .set_link_ksettings = e1000_set_link_ksettings, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/devices/e1000/e1000_hw-5.10-ethercat.c b/devices/e1000/e1000_hw-5.10-ethercat.c new file mode 100644 index 00000000..71a259bf --- /dev/null +++ b/devices/e1000/e1000_hw-5.10-ethercat.c @@ -0,0 +1,5635 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000-5.10-ethercat.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_MUTEX(e1000_eeprom_lock); +static DEFINE_SPINLOCK(e1000_phy_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u16 phy_saved_data; + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw - reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset + */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw - Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space + */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ + if (hw->bus_type == e1000_bus_type_pcix && + e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow ctrl is completely disabled by a software over-ride */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. + */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* Wait for autoneg to complete or 4.5 seconds to expire */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again + * for link. + */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been + * met + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration. + */ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && + (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|------------------ + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 status; + u32 rctl; + u32 icr; + s32 ret_val; + u16 phy_data; + + er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, + * turn it off. + */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + * + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 && + !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && + !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) + return E1000_SUCCESS; + + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * @phy_data: pointer to the value on the PHY register + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); +out: + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @phy_data: data to write to the PHY + * + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16)reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32)phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and de-assert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32)(phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else if (eeprom->type == e1000_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it + */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + cond_resched(); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif + if (checksum == (u16)EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16)EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + cond_resched(); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + cond_resched(); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the following 14 receive addresses. RAR[15] is for + * manageability + */ + e_dbg("Clearing RAR[1-14]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filer table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filer table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16)(hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + fallthrough; + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + fallthrough; + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(MPTC); + er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode && + (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + ret_val = e1000_1000Mb_check_cable_length(hw); + if (ret_val) + return ret_val; + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msleep(100); + } + + /* Recommended delay time after link has been lost */ + msleep(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + msleep(10); + return E1000_SUCCESS; +} diff --git a/devices/e1000/e1000_hw-5.10-ethercat.h b/devices/e1000/e1000_hw-5.10-ethercat.h new file mode 100644 index 00000000..900ec37a --- /dev/null +++ b/devices/e1000/e1000_hw-5.10-ethercat.h @@ -0,0 +1,3085 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep-5.10-ethercat.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/devices/e1000/e1000_hw-5.10-orig.c b/devices/e1000/e1000_hw-5.10-orig.c new file mode 100644 index 00000000..4c0c9433 --- /dev/null +++ b/devices/e1000/e1000_hw-5.10-orig.c @@ -0,0 +1,5635 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_MUTEX(e1000_eeprom_lock); +static DEFINE_SPINLOCK(e1000_phy_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u16 phy_saved_data; + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw - reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset + */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw - Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space + */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ + if (hw->bus_type == e1000_bus_type_pcix && + e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow ctrl is completely disabled by a software over-ride */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. + */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* Wait for autoneg to complete or 4.5 seconds to expire */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again + * for link. + */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been + * met + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration. + */ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && + (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|------------------ + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 status; + u32 rctl; + u32 icr; + s32 ret_val; + u16 phy_data; + + er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, + * turn it off. + */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + * + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 && + !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && + !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) + return E1000_SUCCESS; + + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * @phy_data: pointer to the value on the PHY register + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); +out: + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @phy_data: data to write to the PHY + * + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16)reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32)phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and de-assert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32)(phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else if (eeprom->type == e1000_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it + */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + cond_resched(); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif + if (checksum == (u16)EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16)EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + cond_resched(); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + cond_resched(); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the following 14 receive addresses. RAR[15] is for + * manageability + */ + e_dbg("Clearing RAR[1-14]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filer table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filer table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16)(hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + fallthrough; + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + fallthrough; + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(MPTC); + er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode && + (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + ret_val = e1000_1000Mb_check_cable_length(hw); + if (ret_val) + return ret_val; + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msleep(100); + } + + /* Recommended delay time after link has been lost */ + msleep(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + msleep(10); + return E1000_SUCCESS; +} diff --git a/devices/e1000/e1000_hw-5.10-orig.h b/devices/e1000/e1000_hw-5.10-orig.h new file mode 100644 index 00000000..b57a0495 --- /dev/null +++ b/devices/e1000/e1000_hw-5.10-orig.h @@ -0,0 +1,3085 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/devices/e1000/e1000_hw-5.14-ethercat.c b/devices/e1000/e1000_hw-5.14-ethercat.c new file mode 100644 index 00000000..4d6e91dd --- /dev/null +++ b/devices/e1000/e1000_hw-5.14-ethercat.c @@ -0,0 +1,5636 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000-5.14-ethercat.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_MUTEX(e1000_eeprom_lock); +static DEFINE_SPINLOCK(e1000_phy_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u16 phy_saved_data; + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw - reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset + */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw - Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space + */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ + if (hw->bus_type == e1000_bus_type_pcix && + e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow ctrl is completely disabled by a software over-ride */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + break; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. + */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* Wait for autoneg to complete or 4.5 seconds to expire */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again + * for link. + */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been + * met + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration. + */ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && + (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|------------------ + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 status; + u32 rctl; + u32 icr; + s32 ret_val; + u16 phy_data; + + er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, + * turn it off. + */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + * + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 && + !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && + !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) + return E1000_SUCCESS; + + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * @phy_data: pointer to the value on the PHY register + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); +out: + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @phy_data: data to write to the PHY + * + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16)reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32)phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and de-assert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32)(phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else if (eeprom->type == e1000_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it + */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + cond_resched(); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif + if (checksum == (u16)EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16)EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + cond_resched(); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + cond_resched(); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the following 14 receive addresses. RAR[15] is for + * manageability + */ + e_dbg("Clearing RAR[1-14]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filer table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filer table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16)(hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + fallthrough; + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + fallthrough; + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(MPTC); + er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode && + (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + ret_val = e1000_1000Mb_check_cable_length(hw); + if (ret_val) + return ret_val; + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msleep(100); + } + + /* Recommended delay time after link has been lost */ + msleep(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + msleep(10); + return E1000_SUCCESS; +} diff --git a/devices/e1000/e1000_hw-5.14-ethercat.h b/devices/e1000/e1000_hw-5.14-ethercat.h new file mode 100644 index 00000000..86277722 --- /dev/null +++ b/devices/e1000/e1000_hw-5.14-ethercat.h @@ -0,0 +1,3085 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep-5.14-ethercat.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/devices/e1000/e1000_hw-5.14-orig.c b/devices/e1000/e1000_hw-5.14-orig.c new file mode 100644 index 00000000..1042e79a --- /dev/null +++ b/devices/e1000/e1000_hw-5.14-orig.c @@ -0,0 +1,5636 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_MUTEX(e1000_eeprom_lock); +static DEFINE_SPINLOCK(e1000_phy_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u16 phy_saved_data; + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw - reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset + */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw - Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space + */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ + if (hw->bus_type == e1000_bus_type_pcix && + e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow ctrl is completely disabled by a software over-ride */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + break; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. + */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* Wait for autoneg to complete or 4.5 seconds to expire */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again + * for link. + */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been + * met + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration. + */ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && + (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|------------------ + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 status; + u32 rctl; + u32 icr; + s32 ret_val; + u16 phy_data; + + er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, + * turn it off. + */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + * + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 && + !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && + !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) + return E1000_SUCCESS; + + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * @phy_data: pointer to the value on the PHY register + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); +out: + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @phy_data: data to write to the PHY + * + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16)reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32)phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and de-assert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32)(phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else if (eeprom->type == e1000_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it + */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + cond_resched(); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif + if (checksum == (u16)EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16)EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + cond_resched(); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + cond_resched(); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the following 14 receive addresses. RAR[15] is for + * manageability + */ + e_dbg("Clearing RAR[1-14]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filer table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filer table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16)(hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + fallthrough; + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + fallthrough; + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(MPTC); + er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode && + (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + ret_val = e1000_1000Mb_check_cable_length(hw); + if (ret_val) + return ret_val; + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msleep(100); + } + + /* Recommended delay time after link has been lost */ + msleep(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + msleep(10); + return E1000_SUCCESS; +} diff --git a/devices/e1000/e1000_hw-5.14-orig.h b/devices/e1000/e1000_hw-5.14-orig.h new file mode 100644 index 00000000..b57a0495 --- /dev/null +++ b/devices/e1000/e1000_hw-5.14-orig.h @@ -0,0 +1,3085 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/devices/e1000/e1000_hw-5.15-ethercat.c b/devices/e1000/e1000_hw-5.15-ethercat.c new file mode 100644 index 00000000..438bc741 --- /dev/null +++ b/devices/e1000/e1000_hw-5.15-ethercat.c @@ -0,0 +1,5636 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000-5.15-ethercat.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_MUTEX(e1000_eeprom_lock); +static DEFINE_SPINLOCK(e1000_phy_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u16 phy_saved_data; + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw - reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset + */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw - Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space + */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ + if (hw->bus_type == e1000_bus_type_pcix && + e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow ctrl is completely disabled by a software over-ride */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + break; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. + */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* Wait for autoneg to complete or 4.5 seconds to expire */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again + * for link. + */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been + * met + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration. + */ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && + (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|------------------ + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 status; + u32 rctl; + u32 icr; + s32 ret_val; + u16 phy_data; + + er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, + * turn it off. + */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + * + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 && + !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && + !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) + return E1000_SUCCESS; + + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * @phy_data: pointer to the value on the PHY register + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); +out: + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @phy_data: data to write to the PHY + * + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16)reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32)phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and de-assert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32)(phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else if (eeprom->type == e1000_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it + */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + cond_resched(); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif + if (checksum == (u16)EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16)EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + cond_resched(); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + cond_resched(); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the following 14 receive addresses. RAR[15] is for + * manageability + */ + e_dbg("Clearing RAR[1-14]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filer table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filer table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16)(hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + fallthrough; + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + fallthrough; + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(MPTC); + er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode && + (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + ret_val = e1000_1000Mb_check_cable_length(hw); + if (ret_val) + return ret_val; + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msleep(100); + } + + /* Recommended delay time after link has been lost */ + msleep(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + msleep(10); + return E1000_SUCCESS; +} diff --git a/devices/e1000/e1000_hw-5.15-ethercat.h b/devices/e1000/e1000_hw-5.15-ethercat.h new file mode 100644 index 00000000..ca335f35 --- /dev/null +++ b/devices/e1000/e1000_hw-5.15-ethercat.h @@ -0,0 +1,3085 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep-5.15-ethercat.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/devices/e1000/e1000_hw-5.15-orig.c b/devices/e1000/e1000_hw-5.15-orig.c new file mode 100644 index 00000000..1042e79a --- /dev/null +++ b/devices/e1000/e1000_hw-5.15-orig.c @@ -0,0 +1,5636 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_MUTEX(e1000_eeprom_lock); +static DEFINE_SPINLOCK(e1000_phy_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u16 phy_saved_data; + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw - reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset + */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw - Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space + */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ + if (hw->bus_type == e1000_bus_type_pcix && + e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow ctrl is completely disabled by a software over-ride */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + break; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. + */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* Wait for autoneg to complete or 4.5 seconds to expire */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again + * for link. + */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been + * met + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration. + */ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && + (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|------------------ + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 status; + u32 rctl; + u32 icr; + s32 ret_val; + u16 phy_data; + + er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, + * turn it off. + */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + * + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 && + !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && + !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) + return E1000_SUCCESS; + + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * @phy_data: pointer to the value on the PHY register + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); +out: + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @phy_data: data to write to the PHY + * + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16)reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32)phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and de-assert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32)(phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else if (eeprom->type == e1000_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it + */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + cond_resched(); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif + if (checksum == (u16)EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16)EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + cond_resched(); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + cond_resched(); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the following 14 receive addresses. RAR[15] is for + * manageability + */ + e_dbg("Clearing RAR[1-14]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filer table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filer table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16)(hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + fallthrough; + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + fallthrough; + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(MPTC); + er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode && + (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + ret_val = e1000_1000Mb_check_cable_length(hw); + if (ret_val) + return ret_val; + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msleep(100); + } + + /* Recommended delay time after link has been lost */ + msleep(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + msleep(10); + return E1000_SUCCESS; +} diff --git a/devices/e1000/e1000_hw-5.15-orig.h b/devices/e1000/e1000_hw-5.15-orig.h new file mode 100644 index 00000000..b57a0495 --- /dev/null +++ b/devices/e1000/e1000_hw-5.15-orig.h @@ -0,0 +1,3085 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/devices/e1000/e1000_hw-6.1-ethercat.c b/devices/e1000/e1000_hw-6.1-ethercat.c new file mode 100644 index 00000000..f4ab18c9 --- /dev/null +++ b/devices/e1000/e1000_hw-6.1-ethercat.c @@ -0,0 +1,5636 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000-6.1-ethercat.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_MUTEX(e1000_eeprom_lock); +static DEFINE_SPINLOCK(e1000_phy_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u16 phy_saved_data; + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw - reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset + */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw - Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space + */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ + if (hw->bus_type == e1000_bus_type_pcix && + e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow ctrl is completely disabled by a software over-ride */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + break; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. + */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* Wait for autoneg to complete or 4.5 seconds to expire */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again + * for link. + */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been + * met + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration. + */ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && + (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|------------------ + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 status; + u32 rctl; + u32 icr; + s32 ret_val; + u16 phy_data; + + er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, + * turn it off. + */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + * + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 && + !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && + !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) + return E1000_SUCCESS; + + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * @phy_data: pointer to the value on the PHY register + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); +out: + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @phy_data: data to write to the PHY + * + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16)reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32)phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and de-assert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32)(phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else if (eeprom->type == e1000_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it + */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + cond_resched(); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif + if (checksum == (u16)EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16)EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + cond_resched(); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + cond_resched(); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the following 14 receive addresses. RAR[15] is for + * manageability + */ + e_dbg("Clearing RAR[1-14]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filter table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filter table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16)(hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + fallthrough; + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + fallthrough; + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(MPTC); + er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode && + (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + ret_val = e1000_1000Mb_check_cable_length(hw); + if (ret_val) + return ret_val; + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msleep(100); + } + + /* Recommended delay time after link has been lost */ + msleep(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + msleep(10); + return E1000_SUCCESS; +} diff --git a/devices/e1000/e1000_hw-6.1-ethercat.h b/devices/e1000/e1000_hw-6.1-ethercat.h new file mode 100644 index 00000000..fd17e4b7 --- /dev/null +++ b/devices/e1000/e1000_hw-6.1-ethercat.h @@ -0,0 +1,3085 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep-6.1-ethercat.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/devices/e1000/e1000_hw-6.1-orig.c b/devices/e1000/e1000_hw-6.1-orig.c new file mode 100644 index 00000000..4542e2bc --- /dev/null +++ b/devices/e1000/e1000_hw-6.1-orig.c @@ -0,0 +1,5636 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_MUTEX(e1000_eeprom_lock); +static DEFINE_SPINLOCK(e1000_phy_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u16 phy_saved_data; + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw - reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset + */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw - Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space + */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ + if (hw->bus_type == e1000_bus_type_pcix && + e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow ctrl is completely disabled by a software over-ride */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + break; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. + */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* Wait for autoneg to complete or 4.5 seconds to expire */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again + * for link. + */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been + * met + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration. + */ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && + (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|------------------ + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 status; + u32 rctl; + u32 icr; + s32 ret_val; + u16 phy_data; + + er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 || + hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, + * turn it off. + */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + * + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 && + !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && + !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) + return E1000_SUCCESS; + + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * @phy_data: pointer to the value on the PHY register + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); +out: + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16)mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @phy_data: data to write to the PHY + * + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16)reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32)phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32)phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and de-assert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32)(phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else if (eeprom->type == e1000_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it + */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16)(offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + cond_resched(); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif + if (checksum == (u16)EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16)EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + + mutex_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + mutex_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) || + (words > eeprom->word_size - offset) || + (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + cond_resched(); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16)(offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + cond_resched(); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the following 14 receive addresses. RAR[15] is for + * manageability + */ + e_dbg("Clearing RAR[1-14]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filter table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filter table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16)(hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + fallthrough; + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + fallthrough; + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(MPTC); + er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode && + (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + ret_val = e1000_1000Mb_check_cable_length(hw); + if (ret_val) + return ret_val; + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) + return ret_val; + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msleep(100); + } + + /* Recommended delay time after link has been lost */ + msleep(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + msleep(10); + return E1000_SUCCESS; +} diff --git a/devices/e1000/e1000_hw-6.1-orig.h b/devices/e1000/e1000_hw-6.1-orig.h new file mode 100644 index 00000000..b57a0495 --- /dev/null +++ b/devices/e1000/e1000_hw-6.1-orig.h @@ -0,0 +1,3085 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/devices/e1000/e1000_main-5.10-ethercat.c b/devices/e1000/e1000_main-5.10-ethercat.c new file mode 100644 index 00000000..03c4bc6a --- /dev/null +++ b/devices/e1000/e1000_main-5.10-ethercat.c @@ -0,0 +1,5474 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000-5.10-ethercat.h" +#include +#include +#include +#include +#include + +char e1000_driver_name[] = "ec_e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static const struct pci_device_id e1000_pci_tbl[] = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +// do not auto-load driver +// MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +void ec_poll(struct net_device *); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +static int __maybe_unused e1000_suspend(struct device *dev); +static int __maybe_unused e1000_resume(struct device *dev); +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, + .driver = { + .pm = &e1000_pm_ops, + }, + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver, EtherCAT enabled"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - helper function for getting netdev + * @hw: pointer to HW struct + * + * return device used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s\n", e1000_driver_string); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + if (adapter->ecdev) { + return 0; + } + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->ecdev) { + return; + } + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) { + return; + } + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) { + return; + } + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + if (adapter->ecdev) { + /* fill rx ring completely! */ + adapter->alloc_rx_buf(adapter, ring, ring->count); + } else { + /* this one leaves the last ring element unallocated! */ + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + } + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + **/ +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle + */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active + */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + msleep(1); + } +out: + return; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + set_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + cancel_delayed_work_sync(&adapter->watchdog_task); + } + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + + if (!adapter->ecdev) { + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); + } + + /* Only kill reset task if adapter is not resetting */ + if (!adapter->ecdev && !test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + if (!adapter->ecdev) { + netif_tx_disable(netdev); + } + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + /* Set the carrier off after transmits have been disabled in the + * hardware, to avoid race conditions with e1000_watchdog() (which + * may be running concurrently to us, checking for the carrier + * bit to decide whether it should enable transmits again). Such + * a race condition would result into transmission being disabled + * in the hardware until the next IFF_DOWN+IFF_UP cycle. + */ + + if (!adapter->ecdev) { + netif_carrier_off(netdev); + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + } + + /* Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * tasks from rescheduling. + */ + e1000_down_and_stop(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload + */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/* Dump the eeprom for users having checksum issues */ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return; + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 1; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter = NULL; + struct e1000_hw *hw; + + static int cards_found; + static int global_quad_port_a; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + bool disable_dev = false; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), + pci_resource_len(pdev, BAR_1)); + + if (!hw->ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); + + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 46 - 16110 */ + netdev->min_mtu = ETH_ZLEN - ETH_HLEN; + netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state + */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initialization here due to bad MAC address */ + memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) + e_err(probe, "Invalid MAC Address\n"); + + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + fallthrough; + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = true; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + + if (tmp != 0 && tmp != 0xFF) + break; + } + + if (i >= 32) + goto err_eeprom; + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + // offer device to EtherCAT master module + adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); + if (adapter->ecdev) { + err = ecdev_open(adapter->ecdev); + if (err) { + ecdev_withdraw(adapter->ecdev); + goto err_register; + } + } else { + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + } + + e1000_vlan_filter_on_off(adapter, false); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + if (!adapter->ecdev) { + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + } + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. That could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool disable_dev; + + e1000_down_and_stop(adapter); + e1000_release_manageability(adapter); + + if (adapter->ecdev) { + ecdev_close(adapter->ecdev); + ecdev_withdraw(adapter->ecdev); + } else { + unregister_netdev(netdev); + } + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog task is started, + * and the stack is notified that the interface is ready. + **/ +int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + } + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 + */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) == 0; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_tx_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. + */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = true; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_rx_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (!adapter->ecdev && adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl | E1000_RCTL_EN); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info) +{ + if (adapter->ecdev) { + return; + } + + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = false; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_rx_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx netfrags */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + skb_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } + } + + buffer_info->dma = 0; + } + + /* there also may be some cached data from a chained receive */ + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (!adapter->ecdev && netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (!adapter->netdev && netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + if (adapter->ecdev) { + /* fill rx ring completely! */ + adapter->alloc_rx_buf(adapter, ring, ring->count); + } else { + /* this one leaves the last ring element unallocated! */ + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } + + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) + return; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write + */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + link = e1000_has_link(adapter); + if (!adapter->ecdev && (netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev)) || + (!adapter->ecdev && !netif_carrier_ok(netdev))) { + u32 ctrl; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 1); + } + else { + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + adapter->smartspeed = 0; + } + } else { + if ((adapter->ecdev && ecdev_get_link(adapter->ecdev)) + || (!adapter->ecdev && netif_carrier_ok(netdev))) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 0); + } else { + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!adapter->ecdev && !netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* exit immediately since reset is imminent */ + return; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reschedule the task */ + if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + tcp_v6_gso_csum_prep(skb); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_tx_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller + */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = false; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. + */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + len = skb_frag_size(frag); + offset = 0; + + while (len) { + unsigned long bufend; + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. + */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; + if (unlikely(adapter->pcix_82544 && + !(bufend & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_tx_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + + tx_ring->next_to_use = i; +} + +/* 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + */ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + if (adapter->ecdev) { + return -EBUSY; + } + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + /* This goes back to the question of how to logically map a Tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ + tx_ring = adapter->tx_ring; + + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + case e1000_82544: { + unsigned int pull_size; + + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) + break; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + } + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely((hw->mac_type == e1000_82547) && + (e1000_82547_fifo_workaround(adapter, skb)))) { + if (!adapter->ecdev) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + return NETDEV_TX_BUSY; + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb, protocol); + if (tso < 0) { + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = true; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + /* The descriptors needed is higher than other Intel drivers + * due to a number of workarounds. The breakdown is below: + * Data descriptors: MAX_SKB_FRAGS + 1 + * Context Descriptor: 1 + * Keep head from touching tail: 2 + * Workarounds: 3 + */ + int desc_needed = MAX_SKB_FRAGS + 7; + + if (!adapter->ecdev) { + netdev_sent_queue(netdev, skb->len); + skb_tx_timestamp(skb); + } + + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + + /* 82544 potentially requires twice as many data descriptors + * in order to guarantee buffers don't end on evenly-aligned + * dwords + */ + if (adapter->pcix_82544) + desc_needed += MAX_SKB_FRAGS + 1; + + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); + + if (!netdev_xmit_more() || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); + } + } else { + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* transmit dump */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* receive dump */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e_err(drv, "Reset adapter\n"); + e1000_reinit_locked(adapter); +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if (adapter->ecdev) { + return -EBUSY; + } + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); + } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags = 0; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + if (!adapter->ecdev) { + spin_lock_irqsave(&adapter->stats_lock, flags); + } + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + if (!adapter->ecdev) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + } +} + +void ec_poll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) { + e1000_watchdog(&adapter->watchdog_task.work); + adapter->ec_watchdog_jiffies = jiffies; + } + + e1000_intr(0, netdev); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 1); + } + + if (adapter->ecdev) { + int i, ec_work_done = 0; + for (i = 0; i < E1000_MAX_INTR; i++) { + if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring, + &ec_work_done, 100) && + !e1000_clean_tx_irq(adapter, adapter->tx_ring))) { + break; + } + } + } else { + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue + */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @napi: napi struct containing references to driver info + * @budget: budget given to driver for receive packets + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete || work_done == budget) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * @tx_ring: ring to clean + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + dma_rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); + + if (!adapter->ecdev) { + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + } + +#define TX_WAKE_THRESHOLD 32 + if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (!adapter->ecdev && adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)(tx_ring - adapter->tx_ring), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + e1000_dump(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @skb: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) + return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) + return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function for jumbo Rx path + * @bi: software descriptor shadow data + * @skb: skb being modified + * @length: length of data being added + **/ +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->rxbuf.page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @stats: point to stats struct + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + /* an error means any chain goes out the window + * too + */ + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top +process_skb: + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + struct page *p; + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + p = buffer_info->rxbuf.page; + if (length <= copybreak) { + u8 *vaddr; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->rxbuf.page + */ + skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; + } else { + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); + total_rx_packets++; + + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + napi_gro_frags(&adapter->napi); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) +{ + struct sk_buff *skb; + + if (length > copybreak) + return NULL; + + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); + + skb_put_data(skb, data, length); + + return skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 *data; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + if (!adapter->ecdev) { + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } + } + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + if (!adapter->ecdev) { + dev_kfree_skb(skb); + } + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + if (!adapter->ecdev) { + dev_kfree_skb(skb); + } + goto next_desc; + } + } + +process_skb: + total_rx_bytes += (length - 4); /* don't count FCS */ + total_rx_packets++; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + + if (!adapter->ecdev) { + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + } + + if (adapter->ecdev) { + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + ecdev_receive(adapter->ecdev, data, length); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + adapter->ec_watchdog_jiffies = jiffies; + + dma_sync_single_for_device(&adapter->pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + } else { + e1000_receive_skb(adapter, status, rx_desc->special, skb); + } + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + /* allocate a new page if necessary */ + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + * @rx_ring: pointer to ring struct + * @cleaned_count: number of new Rx buffers to try to allocate + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + void *data; + + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, data); + /* Try again, without freeing the previous */ + data = e1000_alloc_frag(adapter); + /* Failed allocation, critical failure */ + if (!data) { + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + /* give up */ + skb_free_frag(data); + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + /* Use new allocation */ + skb_free_frag(olddata); + } + buffer_info->dma = dma_map_single(&pdev->dev, + data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + skb_free_frag(data); + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + + /* XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + skb_free_frag(data); + buffer_info->rxbuf.data = NULL; + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; + } + buffer_info->rxbuf.data = data; + skip: + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: address of board private structure + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back + */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - handle ioctl calls + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + if (adapter->ecdev) { + return -EPERM; + } + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (adapter->ecdev) { + return -EPERM; + } + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void __e1000_vlan_mode(struct e1000_adapter *adapter, + netdev_features_t features) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); +} +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, adapter->netdev->features); + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, features); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return 0; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); + + return 0; +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; + + if (adapter->ecdev) { + return -EBUSY; + } + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + rctl = er32(RCTL); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) + rctl |= E1000_RCTL_MPE; + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + return 0; +} + +static int __maybe_unused e1000_suspend(struct device *dev) +{ + int retval; + struct pci_dev *pdev = to_pci_dev(dev); + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + device_set_wakeup_enable(dev, wake); + + return retval; +} + +static int __maybe_unused e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + if (adapter->ecdev) { + return -EBUSY; + } + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + if (!adapter->ecdev) { + netif_device_attach(netdev); + } + + return 0; +} + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (disable_hardirq(adapter->pdev->irq)) + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/devices/e1000/e1000_main-5.10-orig.c b/devices/e1000/e1000_main-5.10-orig.c new file mode 100644 index 00000000..5e28cf4f --- /dev/null +++ b/devices/e1000/e1000_main-5.10-orig.c @@ -0,0 +1,5315 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000.h" +#include +#include +#include +#include +#include + +char e1000_driver_name[] = "e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static const struct pci_device_id e1000_pci_tbl[] = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +static int __maybe_unused e1000_suspend(struct device *dev); +static int __maybe_unused e1000_resume(struct device *dev); +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, + .driver = { + .pm = &e1000_pm_ops, + }, + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - helper function for getting netdev + * @hw: pointer to HW struct + * + * return device used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s\n", e1000_driver_string); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + **/ +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle + */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active + */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + msleep(1); + } +out: + return; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + set_bit(__E1000_DOWN, &adapter->flags); + + cancel_delayed_work_sync(&adapter->watchdog_task); + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); + + /* Only kill reset task if adapter is not resetting */ + if (!test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_tx_disable(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + /* Set the carrier off after transmits have been disabled in the + * hardware, to avoid race conditions with e1000_watchdog() (which + * may be running concurrently to us, checking for the carrier + * bit to decide whether it should enable transmits again). Such + * a race condition would result into transmission being disabled + * in the hardware until the next IFF_DOWN+IFF_UP cycle. + */ + netif_carrier_off(netdev); + + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + + /* Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * tasks from rescheduling. + */ + e1000_down_and_stop(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload + */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/* Dump the eeprom for users having checksum issues */ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return; + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 1; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter = NULL; + struct e1000_hw *hw; + + static int cards_found; + static int global_quad_port_a; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + bool disable_dev = false; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), + pci_resource_len(pdev, BAR_1)); + + if (!hw->ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); + + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 46 - 16110 */ + netdev->min_mtu = ETH_ZLEN - ETH_HLEN; + netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state + */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initialization here due to bad MAC address */ + memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) + e_err(probe, "Invalid MAC Address\n"); + + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + fallthrough; + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = true; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + + if (tmp != 0 && tmp != 0xFF) + break; + } + + if (i >= 32) + goto err_eeprom; + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + e1000_vlan_filter_on_off(adapter, false); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. That could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool disable_dev; + + e1000_down_and_stop(adapter); + e1000_release_manageability(adapter); + + unregister_netdev(netdev); + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog task is started, + * and the stack is notified that the interface is ready. + **/ +int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 + */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) == 0; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_tx_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. + */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = true; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_rx_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl | E1000_RCTL_EN); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = false; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_rx_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx netfrags */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + skb_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } + } + + buffer_info->dma = 0; + } + + /* there also may be some cached data from a chained receive */ + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) + return; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write + */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + link = e1000_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + adapter->smartspeed = 0; + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* exit immediately since reset is imminent */ + return; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reschedule the task */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + tcp_v6_gso_csum_prep(skb); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_tx_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller + */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = false; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. + */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + len = skb_frag_size(frag); + offset = 0; + + while (len) { + unsigned long bufend; + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. + */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; + if (unlikely(adapter->pcix_82544 && + !(bufend & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_tx_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + + tx_ring->next_to_use = i; +} + +/* 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + */ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + /* This goes back to the question of how to logically map a Tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ + tx_ring = adapter->tx_ring; + + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + case e1000_82544: { + unsigned int pull_size; + + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) + break; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + } + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely((hw->mac_type == e1000_82547) && + (e1000_82547_fifo_workaround(adapter, skb)))) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + return NETDEV_TX_BUSY; + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb, protocol); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = true; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + /* The descriptors needed is higher than other Intel drivers + * due to a number of workarounds. The breakdown is below: + * Data descriptors: MAX_SKB_FRAGS + 1 + * Context Descriptor: 1 + * Keep head from touching tail: 2 + * Workarounds: 3 + */ + int desc_needed = MAX_SKB_FRAGS + 7; + + netdev_sent_queue(netdev, skb->len); + skb_tx_timestamp(skb); + + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + + /* 82544 potentially requires twice as many data descriptors + * in order to guarantee buffers don't end on evenly-aligned + * dwords + */ + if (adapter->pcix_82544) + desc_needed += MAX_SKB_FRAGS + 1; + + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); + + if (!netdev_xmit_more() || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); + } + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* transmit dump */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* receive dump */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e_err(drv, "Reset adapter\n"); + e1000_reinit_locked(adapter); +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); + } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + spin_lock_irqsave(&adapter->stats_lock, flags); + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + spin_unlock_irqrestore(&adapter->stats_lock, flags); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 1); + } + + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue + */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @napi: napi struct containing references to driver info + * @budget: budget given to driver for receive packets + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete || work_done == budget) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * @tx_ring: ring to clean + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + dma_rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); + + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + +#define TX_WAKE_THRESHOLD 32 + if (unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)(tx_ring - adapter->tx_ring), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + e1000_dump(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @skb: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) + return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) + return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function for jumbo Rx path + * @bi: software descriptor shadow data + * @skb: skb being modified + * @length: length of data being added + **/ +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->rxbuf.page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @stats: point to stats struct + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + /* an error means any chain goes out the window + * too + */ + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top +process_skb: + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + struct page *p; + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + p = buffer_info->rxbuf.page; + if (length <= copybreak) { + u8 *vaddr; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->rxbuf.page + */ + skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; + } else { + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); + total_rx_packets++; + + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + napi_gro_frags(&adapter->napi); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) +{ + struct sk_buff *skb; + + if (length > copybreak) + return NULL; + + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); + + skb_put_data(skb, data, length); + + return skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 *data; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + dev_kfree_skb(skb); + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + dev_kfree_skb(skb); + goto next_desc; + } + } + +process_skb: + total_rx_bytes += (length - 4); /* don't count FCS */ + total_rx_packets++; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + /* allocate a new page if necessary */ + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + * @rx_ring: pointer to ring struct + * @cleaned_count: number of new Rx buffers to try to allocate + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + void *data; + + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, data); + /* Try again, without freeing the previous */ + data = e1000_alloc_frag(adapter); + /* Failed allocation, critical failure */ + if (!data) { + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + /* give up */ + skb_free_frag(data); + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + /* Use new allocation */ + skb_free_frag(olddata); + } + buffer_info->dma = dma_map_single(&pdev->dev, + data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + skb_free_frag(data); + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + + /* XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + skb_free_frag(data); + buffer_info->rxbuf.data = NULL; + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; + } + buffer_info->rxbuf.data = data; + skip: + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: address of board private structure + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back + */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - handle ioctl calls + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void __e1000_vlan_mode(struct e1000_adapter *adapter, + netdev_features_t features) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); +} +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, adapter->netdev->features); + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, features); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return 0; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); + + return 0; +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + rctl = er32(RCTL); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) + rctl |= E1000_RCTL_MPE; + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + return 0; +} + +static int __maybe_unused e1000_suspend(struct device *dev) +{ + int retval; + struct pci_dev *pdev = to_pci_dev(dev); + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + device_set_wakeup_enable(dev, wake); + + return retval; +} + +static int __maybe_unused e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + netif_device_attach(netdev); + + return 0; +} + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (disable_hardirq(adapter->pdev->irq)) + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/devices/e1000/e1000_main-5.14-ethercat.c b/devices/e1000/e1000_main-5.14-ethercat.c new file mode 100644 index 00000000..20f154c6 --- /dev/null +++ b/devices/e1000/e1000_main-5.14-ethercat.c @@ -0,0 +1,5473 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000-5.14-ethercat.h" +#include +#include +#include +#include +#include + +char e1000_driver_name[] = "ec_e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static const struct pci_device_id e1000_pci_tbl[] = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +// do not auto-load driver +// MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +void ec_poll(struct net_device *); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +static int __maybe_unused e1000_suspend(struct device *dev); +static int __maybe_unused e1000_resume(struct device *dev); +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, + .driver = { + .pm = &e1000_pm_ops, + }, + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver, EtherCAT enabled"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - helper function for getting netdev + * @hw: pointer to HW struct + * + * return device used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s\n", e1000_driver_string); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + if (adapter->ecdev) { + return 0; + } + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->ecdev) { + return; + } + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) { + return; + } + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) { + return; + } + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + if (adapter->ecdev) { + /* fill rx ring completely! */ + adapter->alloc_rx_buf(adapter, ring, ring->count); + } else { + /* this one leaves the last ring element unallocated! */ + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + } + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + **/ +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle + */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active + */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + msleep(1); + } +out: + return; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + set_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + cancel_delayed_work_sync(&adapter->watchdog_task); + } + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + + if (!adapter->ecdev) { + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); + } + + /* Only kill reset task if adapter is not resetting */ + if (!adapter->ecdev && !test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + if (!adapter->ecdev) { + netif_tx_disable(netdev); + } + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + /* Set the carrier off after transmits have been disabled in the + * hardware, to avoid race conditions with e1000_watchdog() (which + * may be running concurrently to us, checking for the carrier + * bit to decide whether it should enable transmits again). Such + * a race condition would result into transmission being disabled + * in the hardware until the next IFF_DOWN+IFF_UP cycle. + */ + + if (!adapter->ecdev) { + netif_carrier_off(netdev); + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + } + + /* Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * tasks from rescheduling. + */ + e1000_down_and_stop(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload + */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/* Dump the eeprom for users having checksum issues */ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return; + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 1; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter = NULL; + struct e1000_hw *hw; + + static int cards_found; + static int global_quad_port_a; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + bool disable_dev = false; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), + pci_resource_len(pdev, BAR_1)); + + if (!hw->ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); + + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 46 - 16110 */ + netdev->min_mtu = ETH_ZLEN - ETH_HLEN; + netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state + */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initialization here due to bad MAC address */ + memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) + e_err(probe, "Invalid MAC Address\n"); + + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + fallthrough; + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = true; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + + if (tmp != 0 && tmp != 0xFF) + break; + } + + if (i >= 32) + goto err_eeprom; + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + // offer device to EtherCAT master module + adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); + if (adapter->ecdev) { + err = ecdev_open(adapter->ecdev); + if (err) { + ecdev_withdraw(adapter->ecdev); + goto err_register; + } + } else { + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + } + + e1000_vlan_filter_on_off(adapter, false); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + if (!adapter->ecdev) { + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + } + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. That could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool disable_dev; + + e1000_down_and_stop(adapter); + e1000_release_manageability(adapter); + + if (adapter->ecdev) { + ecdev_close(adapter->ecdev); + ecdev_withdraw(adapter->ecdev); + } else { + unregister_netdev(netdev); + } + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog task is started, + * and the stack is notified that the interface is ready. + **/ +int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + } + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 + */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) == 0; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_tx_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. + */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = true; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_rx_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (!adapter->ecdev && adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl | E1000_RCTL_EN); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info) +{ + if (adapter->ecdev) { + return; + } + + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = false; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_rx_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx netfrags */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + skb_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } + } + + buffer_info->dma = 0; + } + + /* there also may be some cached data from a chained receive */ + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (!adapter->ecdev && netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (!adapter->netdev && netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + if (adapter->ecdev) { + /* fill rx ring completely! */ + adapter->alloc_rx_buf(adapter, ring, ring->count); + } else { + /* this one leaves the last ring element unallocated! */ + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } + + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) + return; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write + */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + link = e1000_has_link(adapter); + if (!adapter->ecdev && (netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev)) || + (!adapter->ecdev && !netif_carrier_ok(netdev))) { + u32 ctrl; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 1); + } + else { + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + adapter->smartspeed = 0; + } + } else { + if ((adapter->ecdev && ecdev_get_link(adapter->ecdev)) + || (!adapter->ecdev && netif_carrier_ok(netdev))) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 0); + } else { + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!adapter->ecdev && !netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* exit immediately since reset is imminent */ + return; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reschedule the task */ + if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + tcp_v6_gso_csum_prep(skb); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_tx_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller + */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = false; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. + */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + len = skb_frag_size(frag); + offset = 0; + + while (len) { + unsigned long bufend; + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. + */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; + if (unlikely(adapter->pcix_82544 && + !(bufend & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_tx_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + + tx_ring->next_to_use = i; +} + +/* 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + */ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + if (adapter->ecdev) { + return -EBUSY; + } + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + /* This goes back to the question of how to logically map a Tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ + tx_ring = adapter->tx_ring; + + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + case e1000_82544: { + unsigned int pull_size; + + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) + break; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + } + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely((hw->mac_type == e1000_82547) && + (e1000_82547_fifo_workaround(adapter, skb)))) { + if (!adapter->ecdev) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + return NETDEV_TX_BUSY; + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb, protocol); + if (tso < 0) { + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = true; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + /* The descriptors needed is higher than other Intel drivers + * due to a number of workarounds. The breakdown is below: + * Data descriptors: MAX_SKB_FRAGS + 1 + * Context Descriptor: 1 + * Keep head from touching tail: 2 + * Workarounds: 3 + */ + int desc_needed = MAX_SKB_FRAGS + 7; + + if (!adapter->ecdev) { + netdev_sent_queue(netdev, skb->len); + skb_tx_timestamp(skb); + } + + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + + /* 82544 potentially requires twice as many data descriptors + * in order to guarantee buffers don't end on evenly-aligned + * dwords + */ + if (adapter->pcix_82544) + desc_needed += MAX_SKB_FRAGS + 1; + + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); + + if (adapter->ecdev || !netdev_xmit_more() || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); + } + } else { + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* transmit dump */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* receive dump */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e_err(drv, "Reset adapter\n"); + e1000_reinit_locked(adapter); +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if (adapter->ecdev) { + return -EBUSY; + } + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); + } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags = 0; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + if (!adapter->ecdev) { + spin_lock_irqsave(&adapter->stats_lock, flags); + } + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + if (!adapter->ecdev) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + } +} + +void ec_poll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) { + e1000_watchdog(&adapter->watchdog_task.work); + adapter->ec_watchdog_jiffies = jiffies; + } + + e1000_intr(0, netdev); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 1); + } + + if (adapter->ecdev) { + int i, ec_work_done = 0; + for (i = 0; i < E1000_MAX_INTR; i++) { + if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring, + &ec_work_done, 100) && + !e1000_clean_tx_irq(adapter, adapter->tx_ring))) { + break; + } + } + } else { + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue + */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @napi: napi struct containing references to driver info + * @budget: budget given to driver for receive packets + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete || work_done == budget) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * @tx_ring: ring to clean + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + dma_rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); + + if (!adapter->ecdev) { + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + } + +#define TX_WAKE_THRESHOLD 32 + if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (!adapter->ecdev && adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)(tx_ring - adapter->tx_ring), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + e1000_dump(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @skb: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) + return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) + return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function for jumbo Rx path + * @bi: software descriptor shadow data + * @skb: skb being modified + * @length: length of data being added + **/ +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->rxbuf.page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @stats: point to stats struct + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + /* an error means any chain goes out the window + * too + */ + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top +process_skb: + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + struct page *p; + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + p = buffer_info->rxbuf.page; + if (length <= copybreak) { + u8 *vaddr; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->rxbuf.page + */ + skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; + } else { + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); + total_rx_packets++; + + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + napi_gro_frags(&adapter->napi); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) +{ + struct sk_buff *skb; + + if (length > copybreak) + return NULL; + + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); + + skb_put_data(skb, data, length); + + return skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 *data; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + if (!adapter->ecdev) { + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } + } + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + if (!adapter->ecdev) { + dev_kfree_skb(skb); + } + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + if (!adapter->ecdev) { + dev_kfree_skb(skb); + } + goto next_desc; + } + } + +process_skb: + total_rx_bytes += (length - 4); /* don't count FCS */ + total_rx_packets++; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + + if (!adapter->ecdev) { + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + } + + if (adapter->ecdev) { + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + ecdev_receive(adapter->ecdev, data, length); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + adapter->ec_watchdog_jiffies = jiffies; + + dma_sync_single_for_device(&adapter->pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + } else { + e1000_receive_skb(adapter, status, rx_desc->special, skb); + } + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + /* allocate a new page if necessary */ + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + * @rx_ring: pointer to ring struct + * @cleaned_count: number of new Rx buffers to try to allocate + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + void *data; + + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, data); + /* Try again, without freeing the previous */ + data = e1000_alloc_frag(adapter); + /* Failed allocation, critical failure */ + if (!data) { + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + /* give up */ + skb_free_frag(data); + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + /* Use new allocation */ + skb_free_frag(olddata); + } + buffer_info->dma = dma_map_single(&pdev->dev, + data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + skb_free_frag(data); + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + + /* XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + skb_free_frag(data); + buffer_info->rxbuf.data = NULL; + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; + } + buffer_info->rxbuf.data = data; + skip: + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: address of board private structure + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back + */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - handle ioctl calls + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + if (adapter->ecdev) { + return -EPERM; + } + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (adapter->ecdev) { + return -EPERM; + } + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void __e1000_vlan_mode(struct e1000_adapter *adapter, + netdev_features_t features) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); +} +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, adapter->netdev->features); + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, features); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return 0; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); + + return 0; +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; + + if (adapter->ecdev) { + return -EBUSY; + } + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + rctl = er32(RCTL); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) + rctl |= E1000_RCTL_MPE; + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + return 0; +} + +static int __maybe_unused e1000_suspend(struct device *dev) +{ + int retval; + struct pci_dev *pdev = to_pci_dev(dev); + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + device_set_wakeup_enable(dev, wake); + + return retval; +} + +static int __maybe_unused e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + if (adapter->ecdev) { + return -EBUSY; + } + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + if (!adapter->ecdev) { + netif_device_attach(netdev); + } + + return 0; +} + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (disable_hardirq(adapter->pdev->irq)) + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/devices/e1000/e1000_main-5.14-orig.c b/devices/e1000/e1000_main-5.14-orig.c new file mode 100644 index 00000000..c2a10912 --- /dev/null +++ b/devices/e1000/e1000_main-5.14-orig.c @@ -0,0 +1,5314 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000.h" +#include +#include +#include +#include +#include + +char e1000_driver_name[] = "e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static const struct pci_device_id e1000_pci_tbl[] = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +static int __maybe_unused e1000_suspend(struct device *dev); +static int __maybe_unused e1000_resume(struct device *dev); +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, + .driver = { + .pm = &e1000_pm_ops, + }, + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - helper function for getting netdev + * @hw: pointer to HW struct + * + * return device used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s\n", e1000_driver_string); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + **/ +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle + */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active + */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + msleep(1); + } +out: + return; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + set_bit(__E1000_DOWN, &adapter->flags); + + cancel_delayed_work_sync(&adapter->watchdog_task); + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); + + /* Only kill reset task if adapter is not resetting */ + if (!test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_tx_disable(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + /* Set the carrier off after transmits have been disabled in the + * hardware, to avoid race conditions with e1000_watchdog() (which + * may be running concurrently to us, checking for the carrier + * bit to decide whether it should enable transmits again). Such + * a race condition would result into transmission being disabled + * in the hardware until the next IFF_DOWN+IFF_UP cycle. + */ + netif_carrier_off(netdev); + + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + + /* Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * tasks from rescheduling. + */ + e1000_down_and_stop(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload + */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/* Dump the eeprom for users having checksum issues */ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return; + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 1; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter = NULL; + struct e1000_hw *hw; + + static int cards_found; + static int global_quad_port_a; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + bool disable_dev = false; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), + pci_resource_len(pdev, BAR_1)); + + if (!hw->ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); + + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 46 - 16110 */ + netdev->min_mtu = ETH_ZLEN - ETH_HLEN; + netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state + */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initialization here due to bad MAC address */ + memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) + e_err(probe, "Invalid MAC Address\n"); + + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + fallthrough; + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = true; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + + if (tmp != 0 && tmp != 0xFF) + break; + } + + if (i >= 32) + goto err_eeprom; + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + e1000_vlan_filter_on_off(adapter, false); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. That could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool disable_dev; + + e1000_down_and_stop(adapter); + e1000_release_manageability(adapter); + + unregister_netdev(netdev); + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog task is started, + * and the stack is notified that the interface is ready. + **/ +int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 + */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) == 0; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_tx_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. + */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = true; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_rx_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl | E1000_RCTL_EN); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = false; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_rx_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx netfrags */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + skb_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } + } + + buffer_info->dma = 0; + } + + /* there also may be some cached data from a chained receive */ + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) + return; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write + */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + link = e1000_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + adapter->smartspeed = 0; + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* exit immediately since reset is imminent */ + return; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reschedule the task */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + tcp_v6_gso_csum_prep(skb); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_tx_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller + */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = false; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. + */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + len = skb_frag_size(frag); + offset = 0; + + while (len) { + unsigned long bufend; + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. + */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; + if (unlikely(adapter->pcix_82544 && + !(bufend & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_tx_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + + tx_ring->next_to_use = i; +} + +/* 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + */ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + /* This goes back to the question of how to logically map a Tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ + tx_ring = adapter->tx_ring; + + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + case e1000_82544: { + unsigned int pull_size; + + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) + break; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + } + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely((hw->mac_type == e1000_82547) && + (e1000_82547_fifo_workaround(adapter, skb)))) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + return NETDEV_TX_BUSY; + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb, protocol); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = true; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + /* The descriptors needed is higher than other Intel drivers + * due to a number of workarounds. The breakdown is below: + * Data descriptors: MAX_SKB_FRAGS + 1 + * Context Descriptor: 1 + * Keep head from touching tail: 2 + * Workarounds: 3 + */ + int desc_needed = MAX_SKB_FRAGS + 7; + + netdev_sent_queue(netdev, skb->len); + skb_tx_timestamp(skb); + + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + + /* 82544 potentially requires twice as many data descriptors + * in order to guarantee buffers don't end on evenly-aligned + * dwords + */ + if (adapter->pcix_82544) + desc_needed += MAX_SKB_FRAGS + 1; + + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); + + if (!netdev_xmit_more() || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); + } + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* transmit dump */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* receive dump */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e_err(drv, "Reset adapter\n"); + e1000_reinit_locked(adapter); +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); + } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + spin_lock_irqsave(&adapter->stats_lock, flags); + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + spin_unlock_irqrestore(&adapter->stats_lock, flags); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 1); + } + + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue + */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @napi: napi struct containing references to driver info + * @budget: budget given to driver for receive packets + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete || work_done == budget) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * @tx_ring: ring to clean + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + dma_rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); + + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + +#define TX_WAKE_THRESHOLD 32 + if (unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)(tx_ring - adapter->tx_ring), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + e1000_dump(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @skb: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) + return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) + return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function for jumbo Rx path + * @bi: software descriptor shadow data + * @skb: skb being modified + * @length: length of data being added + **/ +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->rxbuf.page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @stats: point to stats struct + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + /* an error means any chain goes out the window + * too + */ + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top +process_skb: + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + struct page *p; + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + p = buffer_info->rxbuf.page; + if (length <= copybreak) { + u8 *vaddr; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->rxbuf.page + */ + skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; + } else { + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); + total_rx_packets++; + + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + napi_gro_frags(&adapter->napi); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) +{ + struct sk_buff *skb; + + if (length > copybreak) + return NULL; + + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); + + skb_put_data(skb, data, length); + + return skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 *data; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + dev_kfree_skb(skb); + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + dev_kfree_skb(skb); + goto next_desc; + } + } + +process_skb: + total_rx_bytes += (length - 4); /* don't count FCS */ + total_rx_packets++; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + /* allocate a new page if necessary */ + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + * @rx_ring: pointer to ring struct + * @cleaned_count: number of new Rx buffers to try to allocate + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + void *data; + + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, data); + /* Try again, without freeing the previous */ + data = e1000_alloc_frag(adapter); + /* Failed allocation, critical failure */ + if (!data) { + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + /* give up */ + skb_free_frag(data); + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + /* Use new allocation */ + skb_free_frag(olddata); + } + buffer_info->dma = dma_map_single(&pdev->dev, + data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + skb_free_frag(data); + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + + /* XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + skb_free_frag(data); + buffer_info->rxbuf.data = NULL; + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; + } + buffer_info->rxbuf.data = data; + skip: + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: address of board private structure + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back + */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - handle ioctl calls + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void __e1000_vlan_mode(struct e1000_adapter *adapter, + netdev_features_t features) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); +} +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, adapter->netdev->features); + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, features); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return 0; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); + + return 0; +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + rctl = er32(RCTL); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) + rctl |= E1000_RCTL_MPE; + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + return 0; +} + +static int __maybe_unused e1000_suspend(struct device *dev) +{ + int retval; + struct pci_dev *pdev = to_pci_dev(dev); + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + device_set_wakeup_enable(dev, wake); + + return retval; +} + +static int __maybe_unused e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + netif_device_attach(netdev); + + return 0; +} + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (disable_hardirq(adapter->pdev->irq)) + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/devices/e1000/e1000_main-5.15-ethercat.c b/devices/e1000/e1000_main-5.15-ethercat.c new file mode 100644 index 00000000..5f94a2c3 --- /dev/null +++ b/devices/e1000/e1000_main-5.15-ethercat.c @@ -0,0 +1,5473 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000-5.15-ethercat.h" +#include +#include +#include +#include +#include + +char e1000_driver_name[] = "ec_e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static const struct pci_device_id e1000_pci_tbl[] = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +// do not auto-load driver +// MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +void ec_poll(struct net_device *); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +static int __maybe_unused e1000_suspend(struct device *dev); +static int __maybe_unused e1000_resume(struct device *dev); +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, + .driver = { + .pm = &e1000_pm_ops, + }, + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver, EtherCAT enabled"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - helper function for getting netdev + * @hw: pointer to HW struct + * + * return device used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s\n", e1000_driver_string); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + if (adapter->ecdev) { + return 0; + } + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->ecdev) { + return; + } + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) { + return; + } + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) { + return; + } + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + if (adapter->ecdev) { + /* fill rx ring completely! */ + adapter->alloc_rx_buf(adapter, ring, ring->count); + } else { + /* this one leaves the last ring element unallocated! */ + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + } + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + **/ +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle + */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active + */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + msleep(1); + } +out: + return; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + set_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + cancel_delayed_work_sync(&adapter->watchdog_task); + } + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + + if (!adapter->ecdev) { + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); + } + + /* Only kill reset task if adapter is not resetting */ + if (!adapter->ecdev && !test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + if (!adapter->ecdev) { + netif_tx_disable(netdev); + } + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + /* Set the carrier off after transmits have been disabled in the + * hardware, to avoid race conditions with e1000_watchdog() (which + * may be running concurrently to us, checking for the carrier + * bit to decide whether it should enable transmits again). Such + * a race condition would result into transmission being disabled + * in the hardware until the next IFF_DOWN+IFF_UP cycle. + */ + + if (!adapter->ecdev) { + netif_carrier_off(netdev); + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + } + + /* Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * tasks from rescheduling. + */ + e1000_down_and_stop(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload + */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/* Dump the eeprom for users having checksum issues */ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return; + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 1; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_eth_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter = NULL; + struct e1000_hw *hw; + + static int cards_found; + static int global_quad_port_a; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + bool disable_dev = false; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), + pci_resource_len(pdev, BAR_1)); + + if (!hw->ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); + + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 46 - 16110 */ + netdev->min_mtu = ETH_ZLEN - ETH_HLEN; + netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state + */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initialization here due to bad MAC address */ + memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) + e_err(probe, "Invalid MAC Address\n"); + + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + fallthrough; + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = true; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + + if (tmp != 0 && tmp != 0xFF) + break; + } + + if (i >= 32) + goto err_eeprom; + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + // offer device to EtherCAT master module + adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); + if (adapter->ecdev) { + err = ecdev_open(adapter->ecdev); + if (err) { + ecdev_withdraw(adapter->ecdev); + goto err_register; + } + } else { + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + } + + e1000_vlan_filter_on_off(adapter, false); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + if (!adapter->ecdev) { + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + } + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. That could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool disable_dev; + + e1000_down_and_stop(adapter); + e1000_release_manageability(adapter); + + if (adapter->ecdev) { + ecdev_close(adapter->ecdev); + ecdev_withdraw(adapter->ecdev); + } else { + unregister_netdev(netdev); + } + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog task is started, + * and the stack is notified that the interface is ready. + **/ +int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + } + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 + */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) == 0; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_tx_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. + */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = true; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_rx_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (!adapter->ecdev && adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl | E1000_RCTL_EN); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info) +{ + if (adapter->ecdev) { + return; + } + + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = false; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_rx_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx netfrags */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + skb_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } + } + + buffer_info->dma = 0; + } + + /* there also may be some cached data from a chained receive */ + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (!adapter->ecdev && netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (!adapter->netdev && netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + if (adapter->ecdev) { + /* fill rx ring completely! */ + adapter->alloc_rx_buf(adapter, ring, ring->count); + } else { + /* this one leaves the last ring element unallocated! */ + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } + + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) + return; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write + */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + link = e1000_has_link(adapter); + if (!adapter->ecdev && (netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev)) || + (!adapter->ecdev && !netif_carrier_ok(netdev))) { + u32 ctrl; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 1); + } + else { + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + adapter->smartspeed = 0; + } + } else { + if ((adapter->ecdev && ecdev_get_link(adapter->ecdev)) + || (!adapter->ecdev && netif_carrier_ok(netdev))) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 0); + } else { + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!adapter->ecdev && !netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* exit immediately since reset is imminent */ + return; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reschedule the task */ + if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + tcp_v6_gso_csum_prep(skb); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_tx_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller + */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = false; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. + */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + len = skb_frag_size(frag); + offset = 0; + + while (len) { + unsigned long bufend; + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. + */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; + if (unlikely(adapter->pcix_82544 && + !(bufend & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_tx_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + + tx_ring->next_to_use = i; +} + +/* 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + */ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + if (adapter->ecdev) { + return -EBUSY; + } + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + /* This goes back to the question of how to logically map a Tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ + tx_ring = adapter->tx_ring; + + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + case e1000_82544: { + unsigned int pull_size; + + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) + break; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + } + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely((hw->mac_type == e1000_82547) && + (e1000_82547_fifo_workaround(adapter, skb)))) { + if (!adapter->ecdev) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + return NETDEV_TX_BUSY; + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb, protocol); + if (tso < 0) { + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = true; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + /* The descriptors needed is higher than other Intel drivers + * due to a number of workarounds. The breakdown is below: + * Data descriptors: MAX_SKB_FRAGS + 1 + * Context Descriptor: 1 + * Keep head from touching tail: 2 + * Workarounds: 3 + */ + int desc_needed = MAX_SKB_FRAGS + 7; + + if (!adapter->ecdev) { + netdev_sent_queue(netdev, skb->len); + skb_tx_timestamp(skb); + } + + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + + /* 82544 potentially requires twice as many data descriptors + * in order to guarantee buffers don't end on evenly-aligned + * dwords + */ + if (adapter->pcix_82544) + desc_needed += MAX_SKB_FRAGS + 1; + + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); + + if (adapter->ecdev || !netdev_xmit_more() || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); + } + } else { + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* transmit dump */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* receive dump */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e_err(drv, "Reset adapter\n"); + e1000_reinit_locked(adapter); +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if (adapter->ecdev) { + return -EBUSY; + } + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); + } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags = 0; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + if (!adapter->ecdev) { + spin_lock_irqsave(&adapter->stats_lock, flags); + } + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + if (!adapter->ecdev) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + } +} + +void ec_poll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) { + e1000_watchdog(&adapter->watchdog_task.work); + adapter->ec_watchdog_jiffies = jiffies; + } + + e1000_intr(0, netdev); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 1); + } + + if (adapter->ecdev) { + int i, ec_work_done = 0; + for (i = 0; i < E1000_MAX_INTR; i++) { + if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring, + &ec_work_done, 100) && + !e1000_clean_tx_irq(adapter, adapter->tx_ring))) { + break; + } + } + } else { + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue + */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @napi: napi struct containing references to driver info + * @budget: budget given to driver for receive packets + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete || work_done == budget) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * @tx_ring: ring to clean + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + dma_rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); + + if (!adapter->ecdev) { + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + } + +#define TX_WAKE_THRESHOLD 32 + if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (!adapter->ecdev && adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)(tx_ring - adapter->tx_ring), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + e1000_dump(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @skb: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) + return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) + return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function for jumbo Rx path + * @bi: software descriptor shadow data + * @skb: skb being modified + * @length: length of data being added + **/ +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->rxbuf.page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @stats: point to stats struct + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + /* an error means any chain goes out the window + * too + */ + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top +process_skb: + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + struct page *p; + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + p = buffer_info->rxbuf.page; + if (length <= copybreak) { + u8 *vaddr; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->rxbuf.page + */ + skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; + } else { + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); + total_rx_packets++; + + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + napi_gro_frags(&adapter->napi); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) +{ + struct sk_buff *skb; + + if (length > copybreak) + return NULL; + + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); + + skb_put_data(skb, data, length); + + return skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 *data; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + if (!adapter->ecdev) { + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } + } + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + if (!adapter->ecdev) { + dev_kfree_skb(skb); + } + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + if (!adapter->ecdev) { + dev_kfree_skb(skb); + } + goto next_desc; + } + } + +process_skb: + total_rx_bytes += (length - 4); /* don't count FCS */ + total_rx_packets++; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + + if (!adapter->ecdev) { + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + } + + if (adapter->ecdev) { + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + ecdev_receive(adapter->ecdev, data, length); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + adapter->ec_watchdog_jiffies = jiffies; + + dma_sync_single_for_device(&adapter->pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + } else { + e1000_receive_skb(adapter, status, rx_desc->special, skb); + } + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + /* allocate a new page if necessary */ + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + * @rx_ring: pointer to ring struct + * @cleaned_count: number of new Rx buffers to try to allocate + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + void *data; + + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, data); + /* Try again, without freeing the previous */ + data = e1000_alloc_frag(adapter); + /* Failed allocation, critical failure */ + if (!data) { + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + /* give up */ + skb_free_frag(data); + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + /* Use new allocation */ + skb_free_frag(olddata); + } + buffer_info->dma = dma_map_single(&pdev->dev, + data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + skb_free_frag(data); + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + + /* XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + skb_free_frag(data); + buffer_info->rxbuf.data = NULL; + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; + } + buffer_info->rxbuf.data = data; + skip: + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: address of board private structure + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back + */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - handle ioctl calls + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + if (adapter->ecdev) { + return -EPERM; + } + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (adapter->ecdev) { + return -EPERM; + } + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void __e1000_vlan_mode(struct e1000_adapter *adapter, + netdev_features_t features) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); +} +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, adapter->netdev->features); + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, features); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return 0; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); + + return 0; +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; + + if (adapter->ecdev) { + return -EBUSY; + } + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + rctl = er32(RCTL); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) + rctl |= E1000_RCTL_MPE; + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + return 0; +} + +static int __maybe_unused e1000_suspend(struct device *dev) +{ + int retval; + struct pci_dev *pdev = to_pci_dev(dev); + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + device_set_wakeup_enable(dev, wake); + + return retval; +} + +static int __maybe_unused e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + if (adapter->ecdev) { + return -EBUSY; + } + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + if (!adapter->ecdev) { + netif_device_attach(netdev); + } + + return 0; +} + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (disable_hardirq(adapter->pdev->irq)) + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/devices/e1000/e1000_main-5.15-orig.c b/devices/e1000/e1000_main-5.15-orig.c new file mode 100644 index 00000000..bed4f040 --- /dev/null +++ b/devices/e1000/e1000_main-5.15-orig.c @@ -0,0 +1,5314 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000.h" +#include +#include +#include +#include +#include + +char e1000_driver_name[] = "e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static const struct pci_device_id e1000_pci_tbl[] = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +static int __maybe_unused e1000_suspend(struct device *dev); +static int __maybe_unused e1000_resume(struct device *dev); +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, + .driver = { + .pm = &e1000_pm_ops, + }, + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - helper function for getting netdev + * @hw: pointer to HW struct + * + * return device used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s\n", e1000_driver_string); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + **/ +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle + */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active + */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + msleep(1); + } +out: + return; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + set_bit(__E1000_DOWN, &adapter->flags); + + cancel_delayed_work_sync(&adapter->watchdog_task); + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); + + /* Only kill reset task if adapter is not resetting */ + if (!test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_tx_disable(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + /* Set the carrier off after transmits have been disabled in the + * hardware, to avoid race conditions with e1000_watchdog() (which + * may be running concurrently to us, checking for the carrier + * bit to decide whether it should enable transmits again). Such + * a race condition would result into transmission being disabled + * in the hardware until the next IFF_DOWN+IFF_UP cycle. + */ + netif_carrier_off(netdev); + + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + + /* Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * tasks from rescheduling. + */ + e1000_down_and_stop(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload + */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/* Dump the eeprom for users having checksum issues */ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return; + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 1; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_eth_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter = NULL; + struct e1000_hw *hw; + + static int cards_found; + static int global_quad_port_a; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + bool disable_dev = false; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), + pci_resource_len(pdev, BAR_1)); + + if (!hw->ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); + + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 46 - 16110 */ + netdev->min_mtu = ETH_ZLEN - ETH_HLEN; + netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state + */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initialization here due to bad MAC address */ + memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) + e_err(probe, "Invalid MAC Address\n"); + + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + fallthrough; + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = true; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + + if (tmp != 0 && tmp != 0xFF) + break; + } + + if (i >= 32) + goto err_eeprom; + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + e1000_vlan_filter_on_off(adapter, false); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. That could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool disable_dev; + + e1000_down_and_stop(adapter); + e1000_release_manageability(adapter); + + unregister_netdev(netdev); + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog task is started, + * and the stack is notified that the interface is ready. + **/ +int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 + */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) == 0; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_tx_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. + */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = true; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_rx_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl | E1000_RCTL_EN); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = false; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_rx_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx netfrags */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + skb_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } + } + + buffer_info->dma = 0; + } + + /* there also may be some cached data from a chained receive */ + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) + return; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write + */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + link = e1000_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + adapter->smartspeed = 0; + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* exit immediately since reset is imminent */ + return; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reschedule the task */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + tcp_v6_gso_csum_prep(skb); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_tx_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller + */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = false; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. + */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + len = skb_frag_size(frag); + offset = 0; + + while (len) { + unsigned long bufend; + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. + */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; + if (unlikely(adapter->pcix_82544 && + !(bufend & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_tx_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + + tx_ring->next_to_use = i; +} + +/* 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + */ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + /* This goes back to the question of how to logically map a Tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ + tx_ring = adapter->tx_ring; + + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + case e1000_82544: { + unsigned int pull_size; + + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) + break; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + } + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely((hw->mac_type == e1000_82547) && + (e1000_82547_fifo_workaround(adapter, skb)))) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + return NETDEV_TX_BUSY; + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb, protocol); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = true; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + /* The descriptors needed is higher than other Intel drivers + * due to a number of workarounds. The breakdown is below: + * Data descriptors: MAX_SKB_FRAGS + 1 + * Context Descriptor: 1 + * Keep head from touching tail: 2 + * Workarounds: 3 + */ + int desc_needed = MAX_SKB_FRAGS + 7; + + netdev_sent_queue(netdev, skb->len); + skb_tx_timestamp(skb); + + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + + /* 82544 potentially requires twice as many data descriptors + * in order to guarantee buffers don't end on evenly-aligned + * dwords + */ + if (adapter->pcix_82544) + desc_needed += MAX_SKB_FRAGS + 1; + + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); + + if (!netdev_xmit_more() || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); + } + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* transmit dump */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* receive dump */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e_err(drv, "Reset adapter\n"); + e1000_reinit_locked(adapter); +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); + } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + spin_lock_irqsave(&adapter->stats_lock, flags); + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + spin_unlock_irqrestore(&adapter->stats_lock, flags); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 1); + } + + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue + */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @napi: napi struct containing references to driver info + * @budget: budget given to driver for receive packets + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete || work_done == budget) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * @tx_ring: ring to clean + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + dma_rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); + + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + +#define TX_WAKE_THRESHOLD 32 + if (unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)(tx_ring - adapter->tx_ring), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + e1000_dump(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @skb: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) + return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) + return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function for jumbo Rx path + * @bi: software descriptor shadow data + * @skb: skb being modified + * @length: length of data being added + **/ +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->rxbuf.page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @stats: point to stats struct + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + /* an error means any chain goes out the window + * too + */ + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top +process_skb: + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + struct page *p; + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + p = buffer_info->rxbuf.page; + if (length <= copybreak) { + u8 *vaddr; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->rxbuf.page + */ + skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; + } else { + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); + total_rx_packets++; + + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + napi_gro_frags(&adapter->napi); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) +{ + struct sk_buff *skb; + + if (length > copybreak) + return NULL; + + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); + + skb_put_data(skb, data, length); + + return skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 *data; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + dev_kfree_skb(skb); + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + dev_kfree_skb(skb); + goto next_desc; + } + } + +process_skb: + total_rx_bytes += (length - 4); /* don't count FCS */ + total_rx_packets++; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + /* allocate a new page if necessary */ + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + * @rx_ring: pointer to ring struct + * @cleaned_count: number of new Rx buffers to try to allocate + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + void *data; + + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, data); + /* Try again, without freeing the previous */ + data = e1000_alloc_frag(adapter); + /* Failed allocation, critical failure */ + if (!data) { + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + /* give up */ + skb_free_frag(data); + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + /* Use new allocation */ + skb_free_frag(olddata); + } + buffer_info->dma = dma_map_single(&pdev->dev, + data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + skb_free_frag(data); + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + + /* XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + skb_free_frag(data); + buffer_info->rxbuf.data = NULL; + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; + } + buffer_info->rxbuf.data = data; + skip: + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: address of board private structure + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back + */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - handle ioctl calls + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void __e1000_vlan_mode(struct e1000_adapter *adapter, + netdev_features_t features) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); +} +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, adapter->netdev->features); + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, features); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return 0; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); + + return 0; +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + rctl = er32(RCTL); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) + rctl |= E1000_RCTL_MPE; + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + return 0; +} + +static int __maybe_unused e1000_suspend(struct device *dev) +{ + int retval; + struct pci_dev *pdev = to_pci_dev(dev); + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + device_set_wakeup_enable(dev, wake); + + return retval; +} + +static int __maybe_unused e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + netif_device_attach(netdev); + + return 0; +} + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (disable_hardirq(adapter->pdev->irq)) + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/devices/e1000/e1000_main-6.1-ethercat.c b/devices/e1000/e1000_main-6.1-ethercat.c new file mode 100644 index 00000000..ce79ffa8 --- /dev/null +++ b/devices/e1000/e1000_main-6.1-ethercat.c @@ -0,0 +1,5475 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000-6.1-ethercat.h" +#include +#include +#include +#include +#include + +char e1000_driver_name[] = "ec_e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static const struct pci_device_id e1000_pci_tbl[] = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +// do not auto-load driver +// MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +void ec_poll(struct net_device *); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +static int __maybe_unused e1000_suspend(struct device *dev); +static int __maybe_unused e1000_resume(struct device *dev); +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, + .driver = { + .pm = &e1000_pm_ops, + }, + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver, EtherCAT enabled"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - helper function for getting netdev + * @hw: pointer to HW struct + * + * return device used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s\n", e1000_driver_string); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + if (adapter->ecdev) { + return 0; + } + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->ecdev) { + return; + } + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) { + return; + } + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) { + return; + } + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + if (adapter->ecdev) { + /* fill rx ring completely! */ + adapter->alloc_rx_buf(adapter, ring, ring->count); + } else { + /* this one leaves the last ring element unallocated! */ + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + } + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + **/ +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle + */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active + */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + msleep(1); + } +out: + return; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + set_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + cancel_delayed_work_sync(&adapter->watchdog_task); + } + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + + if (!adapter->ecdev) { + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); + } + + /* Only kill reset task if adapter is not resetting */ + if (!adapter->ecdev && !test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + if (!adapter->ecdev) { + netif_tx_disable(netdev); + } + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + /* Set the carrier off after transmits have been disabled in the + * hardware, to avoid race conditions with e1000_watchdog() (which + * may be running concurrently to us, checking for the carrier + * bit to decide whether it should enable transmits again). Such + * a race condition would result into transmission being disabled + * in the hardware until the next IFF_DOWN+IFF_UP cycle. + */ + + if (!adapter->ecdev) { + netif_carrier_off(netdev); + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + } + + /* Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * tasks from rescheduling. + */ + e1000_down_and_stop(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload + */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/* Dump the eeprom for users having checksum issues */ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return; + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 1; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_eth_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter = NULL; + struct e1000_hw *hw; + + static int cards_found; + static int global_quad_port_a; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + bool disable_dev = false; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), + pci_resource_len(pdev, BAR_1)); + + if (!hw->ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); + + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 46 - 16110 */ + netdev->min_mtu = ETH_ZLEN - ETH_HLEN; + netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state + */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initialization here due to bad MAC address */ + eth_hw_addr_set(netdev, hw->mac_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) + e_err(probe, "Invalid MAC Address\n"); + + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + fallthrough; + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = true; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + + if (tmp != 0 && tmp != 0xFF) + break; + } + + if (i >= 32) + goto err_eeprom; + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + // offer device to EtherCAT master module + adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); + if (adapter->ecdev) { + err = ecdev_open(adapter->ecdev); + if (err) { + ecdev_withdraw(adapter->ecdev); + goto err_register; + } + } else { + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + } + + e1000_vlan_filter_on_off(adapter, false); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + if (!adapter->ecdev) { + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + } + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. That could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool disable_dev; + + e1000_down_and_stop(adapter); + e1000_release_manageability(adapter); + + if (adapter->ecdev) { + ecdev_close(adapter->ecdev); + ecdev_withdraw(adapter->ecdev); + } else { + unregister_netdev(netdev); + } + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog task is started, + * and the stack is notified that the interface is ready. + **/ +int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + if (!adapter->ecdev) { + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + } + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 + */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) == 0; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_tx_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. + */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = true; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_rx_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (!adapter->ecdev && adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl | E1000_RCTL_EN); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info, + int budget) +{ + if (adapter->ecdev) { + return; + } + + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + napi_consume_skb(buffer_info->skb, budget); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = false; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_rx_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx netfrags */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + skb_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } + } + + buffer_info->dma = 0; + } + + /* there also may be some cached data from a chained receive */ + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (!adapter->ecdev && netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (!adapter->netdev && netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + if (adapter->ecdev) { + /* fill rx ring completely! */ + adapter->alloc_rx_buf(adapter, ring, ring->count); + } else { + /* this one leaves the last ring element unallocated! */ + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } + + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) + return; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write + */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + link = e1000_has_link(adapter); + if (!adapter->ecdev && (netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if ((adapter->ecdev && !ecdev_get_link(adapter->ecdev)) || + (!adapter->ecdev && !netif_carrier_ok(netdev))) { + u32 ctrl; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 1); + } + else { + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + adapter->smartspeed = 0; + } + } else { + if ((adapter->ecdev && ecdev_get_link(adapter->ecdev)) + || (!adapter->ecdev && netif_carrier_ok(netdev))) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 0); + } else { + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!adapter->ecdev && !netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* exit immediately since reset is imminent */ + return; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reschedule the task */ + if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_tcp_all_headers(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + tcp_v6_gso_csum_prep(skb); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_tx_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller + */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = false; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. + */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + len = skb_frag_size(frag); + offset = 0; + + while (len) { + unsigned long bufend; + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. + */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; + if (unlikely(adapter->pcix_82544 && + !(bufend & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_tx_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + + tx_ring->next_to_use = i; +} + +/* 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + */ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + if (adapter->ecdev) { + return -EBUSY; + } + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + /* This goes back to the question of how to logically map a Tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ + tx_ring = adapter->tx_ring; + + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_tcp_all_headers(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + case e1000_82544: { + unsigned int pull_size; + + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) + break; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + } + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely((hw->mac_type == e1000_82547) && + (e1000_82547_fifo_workaround(adapter, skb)))) { + if (!adapter->ecdev) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + return NETDEV_TX_BUSY; + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb, protocol); + if (tso < 0) { + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = true; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + /* The descriptors needed is higher than other Intel drivers + * due to a number of workarounds. The breakdown is below: + * Data descriptors: MAX_SKB_FRAGS + 1 + * Context Descriptor: 1 + * Keep head from touching tail: 2 + * Workarounds: 3 + */ + int desc_needed = MAX_SKB_FRAGS + 7; + + if (!adapter->ecdev) { + netdev_sent_queue(netdev, skb->len); + skb_tx_timestamp(skb); + } + + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + + /* 82544 potentially requires twice as many data descriptors + * in order to guarantee buffers don't end on evenly-aligned + * dwords + */ + if (adapter->pcix_82544) + desc_needed += MAX_SKB_FRAGS + 1; + + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); + + if (adapter->ecdev || !netdev_xmit_more() || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); + } + } else { + if (!adapter->ecdev) { + dev_kfree_skb_any(skb); + } + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* transmit dump */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* receive dump */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e_err(drv, "Reset adapter\n"); + e1000_reinit_locked(adapter); +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if (adapter->ecdev) { + return -EBUSY; + } + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); + } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags = 0; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + if (!adapter->ecdev) { + spin_lock_irqsave(&adapter->stats_lock, flags); + } + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + if (!adapter->ecdev) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + } +} + +void ec_poll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) { + e1000_watchdog(&adapter->watchdog_task.work); + adapter->ec_watchdog_jiffies = jiffies; + } + + e1000_intr(0, netdev); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!adapter->ecdev && !test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 1); + } + + if (adapter->ecdev) { + int i, ec_work_done = 0; + for (i = 0; i < E1000_MAX_INTR; i++) { + if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring, + &ec_work_done, 100) && + !e1000_clean_tx_irq(adapter, adapter->tx_ring))) { + break; + } + } + } else { + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue + */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @napi: napi struct containing references to driver info + * @budget: budget given to driver for receive packets + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete || work_done == budget) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * @tx_ring: ring to clean + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + dma_rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info, + 64); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); + + if (!adapter->ecdev) { + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + } + +#define TX_WAKE_THRESHOLD 32 + if (!adapter->ecdev && unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (!adapter->ecdev && adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)(tx_ring - adapter->tx_ring), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + e1000_dump(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @skb: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) + return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) + return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function for jumbo Rx path + * @bi: software descriptor shadow data + * @skb: skb being modified + * @length: length of data being added + **/ +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->rxbuf.page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @stats: point to stats struct + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + /* an error means any chain goes out the window + * too + */ + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top +process_skb: + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + struct page *p; + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + p = buffer_info->rxbuf.page; + if (length <= copybreak) { + u8 *vaddr; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->rxbuf.page + */ + skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; + } else { + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); + total_rx_packets++; + + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + napi_gro_frags(&adapter->napi); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) +{ + struct sk_buff *skb; + + if (length > copybreak) + return NULL; + + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); + + skb_put_data(skb, data, length); + + return skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 *data; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + if (!adapter->ecdev) { + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } + } + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + if (!adapter->ecdev) { + dev_kfree_skb(skb); + } + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + if (!adapter->ecdev) { + dev_kfree_skb(skb); + } + goto next_desc; + } + } + +process_skb: + total_rx_bytes += (length - 4); /* don't count FCS */ + total_rx_packets++; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + + if (!adapter->ecdev) { + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + } + + if (adapter->ecdev) { + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + ecdev_receive(adapter->ecdev, data, length); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + adapter->ec_watchdog_jiffies = jiffies; + + dma_sync_single_for_device(&adapter->pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + } else { + e1000_receive_skb(adapter, status, rx_desc->special, skb); + } + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + /* allocate a new page if necessary */ + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + * @rx_ring: pointer to ring struct + * @cleaned_count: number of new Rx buffers to try to allocate + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + void *data; + + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, data); + /* Try again, without freeing the previous */ + data = e1000_alloc_frag(adapter); + /* Failed allocation, critical failure */ + if (!data) { + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + /* give up */ + skb_free_frag(data); + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + /* Use new allocation */ + skb_free_frag(olddata); + } + buffer_info->dma = dma_map_single(&pdev->dev, + data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + skb_free_frag(data); + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + + /* XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + skb_free_frag(data); + buffer_info->rxbuf.data = NULL; + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; + } + buffer_info->rxbuf.data = data; + skip: + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: address of board private structure + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back + */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - handle ioctl calls + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + if (adapter->ecdev) { + return -EPERM; + } + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (adapter->ecdev) { + return -EPERM; + } + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void __e1000_vlan_mode(struct e1000_adapter *adapter, + netdev_features_t features) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); +} +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, adapter->netdev->features); + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, features); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return 0; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); + + return 0; +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; + + if (adapter->ecdev) { + return -EBUSY; + } + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + rctl = er32(RCTL); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) + rctl |= E1000_RCTL_MPE; + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + return 0; +} + +static int __maybe_unused e1000_suspend(struct device *dev) +{ + int retval; + struct pci_dev *pdev = to_pci_dev(dev); + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + device_set_wakeup_enable(dev, wake); + + return retval; +} + +static int __maybe_unused e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + if (adapter->ecdev) { + return -EBUSY; + } + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + if (!adapter->ecdev) { + netif_device_attach(netdev); + } + + return 0; +} + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (disable_hardirq(adapter->pdev->irq)) + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/devices/e1000/e1000_main-6.1-orig.c b/devices/e1000/e1000_main-6.1-orig.c new file mode 100644 index 00000000..61e60e4d --- /dev/null +++ b/devices/e1000/e1000_main-6.1-orig.c @@ -0,0 +1,5316 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000.h" +#include +#include +#include +#include +#include + +char e1000_driver_name[] = "e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static const struct pci_device_id e1000_pci_tbl[] = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +static int __maybe_unused e1000_suspend(struct device *dev); +static int __maybe_unused e1000_resume(struct device *dev); +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, + .driver = { + .pm = &e1000_pm_ops, + }, + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - helper function for getting netdev + * @hw: pointer to HW struct + * + * return device used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s\n", e1000_driver_string); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + **/ +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle + */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active + */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + msleep(1); + } +out: + return; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + set_bit(__E1000_DOWN, &adapter->flags); + + cancel_delayed_work_sync(&adapter->watchdog_task); + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); + + /* Only kill reset task if adapter is not resetting */ + if (!test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_tx_disable(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + /* Set the carrier off after transmits have been disabled in the + * hardware, to avoid race conditions with e1000_watchdog() (which + * may be running concurrently to us, checking for the carrier + * bit to decide whether it should enable transmits again). Such + * a race condition would result into transmission being disabled + * in the hardware until the next IFF_DOWN+IFF_UP cycle. + */ + netif_carrier_off(netdev); + + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + + /* Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * tasks from rescheduling. + */ + e1000_down_and_stop(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload + */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/* Dump the eeprom for users having checksum issues */ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return; + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 1; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_eth_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter = NULL; + struct e1000_hw *hw; + + static int cards_found; + static int global_quad_port_a; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + bool disable_dev = false; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), + pci_resource_len(pdev, BAR_1)); + + if (!hw->ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); + + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 46 - 16110 */ + netdev->min_mtu = ETH_ZLEN - ETH_HLEN; + netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state + */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initialization here due to bad MAC address */ + eth_hw_addr_set(netdev, hw->mac_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) + e_err(probe, "Invalid MAC Address\n"); + + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + fallthrough; + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = true; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + + if (tmp != 0 && tmp != 0xFF) + break; + } + + if (i >= 32) + goto err_eeprom; + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + e1000_vlan_filter_on_off(adapter, false); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. That could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool disable_dev; + + e1000_down_and_stop(adapter); + e1000_release_manageability(adapter); + + unregister_netdev(netdev); + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog task is started, + * and the stack is notified that the interface is ready. + **/ +int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 + */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) == 0; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_tx_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. + */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = true; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_rx_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl | E1000_RCTL_EN); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info, + int budget) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + napi_consume_skb(buffer_info->skb, budget); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = false; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_rx_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx netfrags */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + skb_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } + } + + buffer_info->dma = 0; + } + + /* there also may be some cached data from a chained receive */ + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) + return; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write + */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + link = e1000_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + adapter->smartspeed = 0; + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* exit immediately since reset is imminent */ + return; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reschedule the task */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_tcp_all_headers(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + tcp_v6_gso_csum_prep(skb); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_tx_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller + */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = false; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. + */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + len = skb_frag_size(frag); + offset = 0; + + while (len) { + unsigned long bufend; + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. + */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; + if (unlikely(adapter->pcix_82544 && + !(bufend & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_tx_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + + tx_ring->next_to_use = i; +} + +/* 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + */ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + /* This goes back to the question of how to logically map a Tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ + tx_ring = adapter->tx_ring; + + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_tcp_all_headers(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + case e1000_82544: { + unsigned int pull_size; + + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) + break; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + } + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely((hw->mac_type == e1000_82547) && + (e1000_82547_fifo_workaround(adapter, skb)))) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + return NETDEV_TX_BUSY; + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb, protocol); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = true; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + /* The descriptors needed is higher than other Intel drivers + * due to a number of workarounds. The breakdown is below: + * Data descriptors: MAX_SKB_FRAGS + 1 + * Context Descriptor: 1 + * Keep head from touching tail: 2 + * Workarounds: 3 + */ + int desc_needed = MAX_SKB_FRAGS + 7; + + netdev_sent_queue(netdev, skb->len); + skb_tx_timestamp(skb); + + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + + /* 82544 potentially requires twice as many data descriptors + * in order to guarantee buffers don't end on evenly-aligned + * dwords + */ + if (adapter->pcix_82544) + desc_needed += MAX_SKB_FRAGS + 1; + + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); + + if (!netdev_xmit_more() || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); + } + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* transmit dump */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* receive dump */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e_err(drv, "Reset adapter\n"); + e1000_reinit_locked(adapter); +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); + } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + spin_lock_irqsave(&adapter->stats_lock, flags); + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + spin_unlock_irqrestore(&adapter->stats_lock, flags); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 1); + } + + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue + */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @napi: napi struct containing references to driver info + * @budget: budget given to driver for receive packets + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete || work_done == budget) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * @tx_ring: ring to clean + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + dma_rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info, + 64); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) + i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); + + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + +#define TX_WAKE_THRESHOLD 32 + if (unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)(tx_ring - adapter->tx_ring), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + e1000_dump(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @skb: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) + return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) + return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function for jumbo Rx path + * @bi: software descriptor shadow data + * @skb: skb being modified + * @length: length of data being added + **/ +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->rxbuf.page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @stats: point to stats struct + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + /* an error means any chain goes out the window + * too + */ + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top +process_skb: + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + struct page *p; + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + p = buffer_info->rxbuf.page; + if (length <= copybreak) { + u8 *vaddr; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->rxbuf.page + */ + skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; + } else { + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); + total_rx_packets++; + + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + napi_gro_frags(&adapter->napi); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) +{ + struct sk_buff *skb; + + if (length > copybreak) + return NULL; + + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); + + skb_put_data(skb, data, length); + + return skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 *data; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = napi_build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } + + if (++i == rx_ring->count) + i = 0; + + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + dev_kfree_skb(skb); + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + dev_kfree_skb(skb); + goto next_desc; + } + } + +process_skb: + total_rx_bytes += (length - 4); /* don't count FCS */ + total_rx_packets++; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + /* allocate a new page if necessary */ + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + * @rx_ring: pointer to ring struct + * @cleaned_count: number of new Rx buffers to try to allocate + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + void *data; + + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, data); + /* Try again, without freeing the previous */ + data = e1000_alloc_frag(adapter); + /* Failed allocation, critical failure */ + if (!data) { + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + /* give up */ + skb_free_frag(data); + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + /* Use new allocation */ + skb_free_frag(olddata); + } + buffer_info->dma = dma_map_single(&pdev->dev, + data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + skb_free_frag(data); + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + + /* XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + skb_free_frag(data); + buffer_info->rxbuf.data = NULL; + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; + } + buffer_info->rxbuf.data = data; + skip: + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: address of board private structure + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back + */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) + return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - handle ioctl calls + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: pointer to our netdev + * @ifr: pointer to interface request structure + * @cmd: ioctl data + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void __e1000_vlan_mode(struct e1000_adapter *adapter, + netdev_features_t features) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); +} +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, adapter->netdev->features); + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, features); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return 0; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); + + return 0; +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + rctl = er32(RCTL); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) + rctl |= E1000_RCTL_MPE; + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + return 0; +} + +static int __maybe_unused e1000_suspend(struct device *dev) +{ + int retval; + struct pci_dev *pdev = to_pci_dev(dev); + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + device_set_wakeup_enable(dev, wake); + + return retval; +} + +static int __maybe_unused e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + netif_device_attach(netdev); + + return 0; +} + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (disable_hardirq(adapter->pdev->irq)) + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/devices/e1000/e1000_osdep-5.10-ethercat.h b/devices/e1000/e1000_osdep-5.10-ethercat.h new file mode 100644 index 00000000..e966bb29 --- /dev/null +++ b/devices/e1000/e1000_osdep-5.10-ethercat.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/devices/e1000/e1000_osdep-5.10-orig.h b/devices/e1000/e1000_osdep-5.10-orig.h new file mode 100644 index 00000000..e966bb29 --- /dev/null +++ b/devices/e1000/e1000_osdep-5.10-orig.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/devices/e1000/e1000_osdep-5.14-ethercat.h b/devices/e1000/e1000_osdep-5.14-ethercat.h new file mode 100644 index 00000000..e966bb29 --- /dev/null +++ b/devices/e1000/e1000_osdep-5.14-ethercat.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/devices/e1000/e1000_osdep-5.14-orig.h b/devices/e1000/e1000_osdep-5.14-orig.h new file mode 100644 index 00000000..e966bb29 --- /dev/null +++ b/devices/e1000/e1000_osdep-5.14-orig.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/devices/e1000/e1000_osdep-5.15-ethercat.h b/devices/e1000/e1000_osdep-5.15-ethercat.h new file mode 100644 index 00000000..e966bb29 --- /dev/null +++ b/devices/e1000/e1000_osdep-5.15-ethercat.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/devices/e1000/e1000_osdep-5.15-orig.h b/devices/e1000/e1000_osdep-5.15-orig.h new file mode 100644 index 00000000..e966bb29 --- /dev/null +++ b/devices/e1000/e1000_osdep-5.15-orig.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/devices/e1000/e1000_osdep-6.1-ethercat.h b/devices/e1000/e1000_osdep-6.1-ethercat.h new file mode 100644 index 00000000..e966bb29 --- /dev/null +++ b/devices/e1000/e1000_osdep-6.1-ethercat.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/devices/e1000/e1000_osdep-6.1-orig.h b/devices/e1000/e1000_osdep-6.1-orig.h new file mode 100644 index 00000000..e966bb29 --- /dev/null +++ b/devices/e1000/e1000_osdep-6.1-orig.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/devices/e1000/e1000_param-5.10-ethercat.c b/devices/e1000/e1000_param-5.10-ethercat.c new file mode 100644 index 00000000..07a5106b --- /dev/null +++ b/devices/e1000/e1000_param-5.10-ethercat.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000-5.10-ethercat.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + fallthrough; + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/devices/e1000/e1000_param-5.10-orig.c b/devices/e1000/e1000_param-5.10-orig.c new file mode 100644 index 00000000..4d4f5bf1 --- /dev/null +++ b/devices/e1000/e1000_param-5.10-orig.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + fallthrough; + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/devices/e1000/e1000_param-5.14-ethercat.c b/devices/e1000/e1000_param-5.14-ethercat.c new file mode 100644 index 00000000..2800a30a --- /dev/null +++ b/devices/e1000/e1000_param-5.14-ethercat.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000-5.14-ethercat.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + fallthrough; + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/devices/e1000/e1000_param-5.14-orig.c b/devices/e1000/e1000_param-5.14-orig.c new file mode 100644 index 00000000..4d4f5bf1 --- /dev/null +++ b/devices/e1000/e1000_param-5.14-orig.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + fallthrough; + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/devices/e1000/e1000_param-5.15-ethercat.c b/devices/e1000/e1000_param-5.15-ethercat.c new file mode 100644 index 00000000..197853fc --- /dev/null +++ b/devices/e1000/e1000_param-5.15-ethercat.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000-5.15-ethercat.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + fallthrough; + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/devices/e1000/e1000_param-5.15-orig.c b/devices/e1000/e1000_param-5.15-orig.c new file mode 100644 index 00000000..4d4f5bf1 --- /dev/null +++ b/devices/e1000/e1000_param-5.15-orig.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + fallthrough; + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/devices/e1000/e1000_param-6.1-ethercat.c b/devices/e1000/e1000_param-6.1-ethercat.c new file mode 100644 index 00000000..e884f985 --- /dev/null +++ b/devices/e1000/e1000_param-6.1-ethercat.c @@ -0,0 +1,727 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000-6.1-ethercat.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + fallthrough; + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/devices/e1000/e1000_param-6.1-orig.c b/devices/e1000/e1000_param-6.1-orig.c new file mode 100644 index 00000000..f4154ca7 --- /dev/null +++ b/devices/e1000/e1000_param-6.1-orig.c @@ -0,0 +1,727 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2006 Intel Corporation. */ + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + fallthrough; + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/devices/e1000/update.sh b/devices/e1000/update.sh index b4c28d49..f691be2a 100755 --- a/devices/e1000/update.sh +++ b/devices/e1000/update.sh @@ -21,7 +21,9 @@ for f in $KERNELDIR/drivers/net/ethernet/intel/e1000/*.[ch]; do cp -v $o $e op=${b/\./-$PREVER-orig.} ep=${b/\./-$PREVER-ethercat.} - diff -u $op $ep | patch -p1 $e + diff -p -u $op $ep | patch -p1 $e sed -i s/$PREVER-ethercat.h/$KERNELVER-ethercat.h/ $e git add $o $e + echo -e "\t$e \\\\\n\t$o \\\\" >> Makefile.am done +echo "Remember to update Makefile.am!" diff --git a/devices/genet/Kbuild.in b/devices/genet/Kbuild.in new file mode 100644 index 00000000..c3a79122 --- /dev/null +++ b/devices/genet/Kbuild.in @@ -0,0 +1,56 @@ +#------------------------------------------------------------------------------ +# +# $Id$ +# +# Copyright (C) 2006-2017 Florian Pose, Ingenieurgemeinschaft IgH +# +# This file is part of the IgH EtherCAT Master. +# +# The IgH EtherCAT Master is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License version 2, as +# published by the Free Software Foundation. +# +# The IgH EtherCAT Master is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the IgH EtherCAT Master; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# --- +# +# The license mentioned above concerns the source code only. Using the +# EtherCAT technology and brand is only permitted in compliance with the +# industrial property and similar rights of Beckhoff Automation GmbH. +# +# --- +# +# vim: syntax=make +# +#------------------------------------------------------------------------------ + +TOPDIR := $(src)/../.. + +REV := $(shell if test -s $(TOPDIR)/revision; then \ + cat $(TOPDIR)/revision; \ + else \ + git -C $(TOPDIR) describe 2>/dev/null || echo "unknown"; \ + fi) + +ifeq (@ENABLE_GENET@,1) + obj-m += ec_genet.o + ec_genet-objs := \ + bcmgenet-@KERNEL_GENET@-ethercat.o \ + bcmmii-@KERNEL_GENET@-ethercat.o \ + bcmgenet_wol-@KERNEL_GENET@-ethercat.o + + CFLAGS_bcmgenet-@KERNEL_GENET@-ethercat.o = -DREV="\"$(REV)\"" +endif + +KBUILD_EXTRA_SYMBOLS := \ + @abs_top_builddir@/$(LINUX_SYMVERS) \ + @abs_top_builddir@/master/$(LINUX_SYMVERS) + +#------------------------------------------------------------------------------ diff --git a/devices/genet/Makefile.am b/devices/genet/Makefile.am new file mode 100644 index 00000000..90f4a2e1 --- /dev/null +++ b/devices/genet/Makefile.am @@ -0,0 +1,40 @@ +#------------------------------------------------------------------------------ +# +# Copyright (C) 2006-2021 Florian Pose, Ingenieurgemeinschaft IgH +# +# This file is part of the IgH EtherCAT Master. +# +# The IgH EtherCAT Master is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License version 2, as +# published by the Free Software Foundation. +# +# The IgH EtherCAT Master is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the IgH EtherCAT Master; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# --- +# +# The license mentioned above concerns the source code only. Using the +# EtherCAT technology and brand is only permitted in compliance with the +# industrial property and similar rights of Beckhoff Automation GmbH. +# +#------------------------------------------------------------------------------ + +include $(top_srcdir)/Makefile.kbuild + +EXTRA_DIST = \ + bcmgenet-6.1-ethercat.c \ + bcmgenet-6.1-ethercat.h \ + bcmgenet-6.1-orig.c \ + bcmgenet-6.1-orig.h \ + bcmgenet_wol-6.1-ethercat.c \ + bcmgenet_wol-6.1-orig.c \ + bcmmii-6.1-ethercat.c \ + bcmmii-6.1-orig.c + +#------------------------------------------------------------------------------ diff --git a/devices/genet/bcmgenet-6.1-ethercat.c b/devices/genet/bcmgenet-6.1-ethercat.c new file mode 100644 index 00000000..15d61dcb --- /dev/null +++ b/devices/genet/bcmgenet-6.1-ethercat.c @@ -0,0 +1,4492 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) controller driver + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bcmgenet-6.1-ethercat.h" + +/* Maximum number of hardware queues, downsized if needed */ +#define GENET_MAX_MQ_CNT 4 + +/* Default highest priority queue for multi queue support */ +#define GENET_Q0_PRIORITY 0 + +#define GENET_Q16_RX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) +#define GENET_Q16_TX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) + +#define RX_BUF_LENGTH 2048 +#define SKB_ALIGNMENT 32 + +/* Tx/Rx DMA register offset, skip 256 descriptors */ +#define WORDS_PER_BD(p) (p->hw_params->words_per_bd) +#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) + +#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +/* Forward declarations */ +static void bcmgenet_set_rx_mode(struct net_device *dev); + +static inline void bcmgenet_writel(u32 value, void __iomem *offset) +{ + /* MIPS chips strapped for BE will automagically configure the + * peripheral registers for CPU-native byte order. + */ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + __raw_writel(value, offset); + else + writel_relaxed(value, offset); +} + +static inline u32 bcmgenet_readl(void __iomem *offset) +{ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + return __raw_readl(offset); + else + return readl_relaxed(offset); +} + +static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, + void __iomem *d, u32 value) +{ + bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS); +} + +static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, + void __iomem *d, + dma_addr_t addr) +{ + bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); +#endif +} + +/* Combined address + length/status setter */ +static inline void dmadesc_set(struct bcmgenet_priv *priv, + void __iomem *d, dma_addr_t addr, u32 val) +{ + dmadesc_set_addr(priv, d, addr); + dmadesc_set_length_status(priv, d, val); +} + +static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, + void __iomem *d) +{ + dma_addr_t addr; + + addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32; +#endif + return addr; +} + +#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" + +#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); + else + return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); +} + +static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); + else + bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); +} + +/* These macros are defined to deal with register map change + * between GENET1.1 and GENET2. Only those currently being used + * by driver are defined. + */ +static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +/* RX/TX DMA register accessors */ +enum dma_reg { + DMA_RING_CFG = 0, + DMA_CTRL, + DMA_STATUS, + DMA_SCB_BURST_SIZE, + DMA_ARB_CTRL, + DMA_PRIORITY_0, + DMA_PRIORITY_1, + DMA_PRIORITY_2, + DMA_INDEX2RING_0, + DMA_INDEX2RING_1, + DMA_INDEX2RING_2, + DMA_INDEX2RING_3, + DMA_INDEX2RING_4, + DMA_INDEX2RING_5, + DMA_INDEX2RING_6, + DMA_INDEX2RING_7, + DMA_RING0_TIMEOUT, + DMA_RING1_TIMEOUT, + DMA_RING2_TIMEOUT, + DMA_RING3_TIMEOUT, + DMA_RING4_TIMEOUT, + DMA_RING5_TIMEOUT, + DMA_RING6_TIMEOUT, + DMA_RING7_TIMEOUT, + DMA_RING8_TIMEOUT, + DMA_RING9_TIMEOUT, + DMA_RING10_TIMEOUT, + DMA_RING11_TIMEOUT, + DMA_RING12_TIMEOUT, + DMA_RING13_TIMEOUT, + DMA_RING14_TIMEOUT, + DMA_RING15_TIMEOUT, + DMA_RING16_TIMEOUT, +}; + +static const u8 bcmgenet_dma_regs_v3plus[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x2C, + [DMA_PRIORITY_0] = 0x30, + [DMA_PRIORITY_1] = 0x34, + [DMA_PRIORITY_2] = 0x38, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, + [DMA_INDEX2RING_0] = 0x70, + [DMA_INDEX2RING_1] = 0x74, + [DMA_INDEX2RING_2] = 0x78, + [DMA_INDEX2RING_3] = 0x7C, + [DMA_INDEX2RING_4] = 0x80, + [DMA_INDEX2RING_5] = 0x84, + [DMA_INDEX2RING_6] = 0x88, + [DMA_INDEX2RING_7] = 0x8C, +}; + +static const u8 bcmgenet_dma_regs_v2[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +static const u8 bcmgenet_dma_regs_v1[] = { + [DMA_CTRL] = 0x00, + [DMA_STATUS] = 0x04, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +/* Set at runtime once bcmgenet version is known */ +static const u8 *bcmgenet_dma_regs; + +static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) +{ + return netdev_priv(dev_get_drvdata(dev)); +} + +static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +/* RDMA/TDMA ring registers and accessors + * we merge the common fields and just prefix with T/D the registers + * having different meaning depending on the direction + */ +enum dma_ring_reg { + TDMA_READ_PTR = 0, + RDMA_WRITE_PTR = TDMA_READ_PTR, + TDMA_READ_PTR_HI, + RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, + TDMA_CONS_INDEX, + RDMA_PROD_INDEX = TDMA_CONS_INDEX, + TDMA_PROD_INDEX, + RDMA_CONS_INDEX = TDMA_PROD_INDEX, + DMA_RING_BUF_SIZE, + DMA_START_ADDR, + DMA_START_ADDR_HI, + DMA_END_ADDR, + DMA_END_ADDR_HI, + DMA_MBUF_DONE_THRESH, + TDMA_FLOW_PERIOD, + RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, + TDMA_WRITE_PTR, + RDMA_READ_PTR = TDMA_WRITE_PTR, + TDMA_WRITE_PTR_HI, + RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI +}; + +/* GENET v4 supports 40-bits pointer addressing + * for obvious reasons the LO and HI word parts + * are contiguous, but this offsets the other + * registers. + */ +static const u8 genet_dma_ring_regs_v4[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_READ_PTR_HI] = 0x04, + [TDMA_CONS_INDEX] = 0x08, + [TDMA_PROD_INDEX] = 0x0C, + [DMA_RING_BUF_SIZE] = 0x10, + [DMA_START_ADDR] = 0x14, + [DMA_START_ADDR_HI] = 0x18, + [DMA_END_ADDR] = 0x1C, + [DMA_END_ADDR_HI] = 0x20, + [DMA_MBUF_DONE_THRESH] = 0x24, + [TDMA_FLOW_PERIOD] = 0x28, + [TDMA_WRITE_PTR] = 0x2C, + [TDMA_WRITE_PTR_HI] = 0x30, +}; + +static const u8 genet_dma_ring_regs_v123[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_CONS_INDEX] = 0x04, + [TDMA_PROD_INDEX] = 0x08, + [DMA_RING_BUF_SIZE] = 0x0C, + [DMA_START_ADDR] = 0x10, + [DMA_END_ADDR] = 0x14, + [DMA_MBUF_DONE_THRESH] = 0x18, + [TDMA_FLOW_PERIOD] = 0x1C, + [TDMA_WRITE_PTR] = 0x20, +}; + +/* Set at runtime once GENET version is known */ +static const u8 *genet_dma_ring_regs; + +static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg |= (1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg |= RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); +} + +static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset, reg, reg1; + + offset = HFB_FLT_ENABLE_V3PLUS; + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); + if (f_index < 32) { + reg1 &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32)); + } else { + reg &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + } + if (!reg && !reg1) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg &= ~RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } +} + +static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, + u32 f_index, u32 rx_queue) +{ + u32 offset; + u32 reg; + + offset = f_index / 8; + reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); + reg &= ~(0xF << (4 * (f_index % 8))); + reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); + bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); +} + +static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, + u32 f_index, u32 f_length) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_LEN_V3PLUS + + ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * + sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg &= ~(0xFF << (8 * (f_index % 4))); + reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static int bcmgenet_hfb_validate_mask(void *mask, size_t size) +{ + while (size) { + switch (*(unsigned char *)mask++) { + case 0x00: + case 0x0f: + case 0xf0: + case 0xff: + size--; + continue; + default: + return -EINVAL; + } + } + + return 0; +} + +#define VALIDATE_MASK(x) \ + bcmgenet_hfb_validate_mask(&(x), sizeof(x)) + +static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, + u32 offset, void *val, void *mask, + size_t size) +{ + u32 index, tmp; + + index = f_index * priv->hw_params->hfb_filter_size + offset / 2; + tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32)); + + while (size--) { + if (offset++ & 1) { + tmp &= ~0x300FF; + tmp |= (*(unsigned char *)val++); + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0x30000; + break; + case 0xF0: + tmp |= 0x20000; + break; + case 0x0F: + tmp |= 0x10000; + break; + } + bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32)); + if (size) + tmp = bcmgenet_hfb_readl(priv, + index * sizeof(u32)); + } else { + tmp &= ~0xCFF00; + tmp |= (*(unsigned char *)val++) << 8; + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0xC0000; + break; + case 0xF0: + tmp |= 0x80000; + break; + case 0x0F: + tmp |= 0x40000; + break; + } + if (!size) + bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32)); + } + } + + return 0; +} + +static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, + struct bcmgenet_rxnfc_rule *rule) +{ + struct ethtool_rx_flow_spec *fs = &rule->fs; + u32 offset = 0, f_length = 0, f; + u8 val_8, mask_8; + __be16 val_16; + u16 mask_16; + size_t size; + + f = fs->location; + if (fs->flow_type & FLOW_MAC_EXT) { + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_ext.h_dest, &fs->m_ext.h_dest, + sizeof(fs->h_ext.h_dest)); + } + + if (fs->flow_type & FLOW_EXT) { + if (fs->m_ext.vlan_etype || + fs->m_ext.vlan_tci) { + bcmgenet_hfb_insert_data(priv, f, 12, + &fs->h_ext.vlan_etype, + &fs->m_ext.vlan_etype, + sizeof(fs->h_ext.vlan_etype)); + bcmgenet_hfb_insert_data(priv, f, 14, + &fs->h_ext.vlan_tci, + &fs->m_ext.vlan_tci, + sizeof(fs->h_ext.vlan_tci)); + offset += VLAN_HLEN; + f_length += DIV_ROUND_UP(VLAN_HLEN, 2); + } + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN, 2); + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_u.ether_spec.h_dest, + &fs->m_u.ether_spec.h_dest, + sizeof(fs->h_u.ether_spec.h_dest)); + bcmgenet_hfb_insert_data(priv, f, ETH_ALEN, + &fs->h_u.ether_spec.h_source, + &fs->m_u.ether_spec.h_source, + sizeof(fs->h_u.ether_spec.h_source)); + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &fs->h_u.ether_spec.h_proto, + &fs->m_u.ether_spec.h_proto, + sizeof(fs->h_u.ether_spec.h_proto)); + break; + case IP_USER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2); + /* Specify IP Ether Type */ + val_16 = htons(ETH_P_IP); + mask_16 = 0xFFFF; + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmgenet_hfb_insert_data(priv, f, 15 + offset, + &fs->h_u.usr_ip4_spec.tos, + &fs->m_u.usr_ip4_spec.tos, + sizeof(fs->h_u.usr_ip4_spec.tos)); + bcmgenet_hfb_insert_data(priv, f, 23 + offset, + &fs->h_u.usr_ip4_spec.proto, + &fs->m_u.usr_ip4_spec.proto, + sizeof(fs->h_u.usr_ip4_spec.proto)); + bcmgenet_hfb_insert_data(priv, f, 26 + offset, + &fs->h_u.usr_ip4_spec.ip4src, + &fs->m_u.usr_ip4_spec.ip4src, + sizeof(fs->h_u.usr_ip4_spec.ip4src)); + bcmgenet_hfb_insert_data(priv, f, 30 + offset, + &fs->h_u.usr_ip4_spec.ip4dst, + &fs->m_u.usr_ip4_spec.ip4dst, + sizeof(fs->h_u.usr_ip4_spec.ip4dst)); + if (!fs->m_u.usr_ip4_spec.l4_4_bytes) + break; + + /* Only supports 20 byte IPv4 header */ + val_8 = 0x45; + mask_8 = 0xFF; + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset, + &val_8, &mask_8, + sizeof(val_8)); + size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); + bcmgenet_hfb_insert_data(priv, f, + ETH_HLEN + 20 + offset, + &fs->h_u.usr_ip4_spec.l4_4_bytes, + &fs->m_u.usr_ip4_spec.l4_4_bytes, + size); + f_length += DIV_ROUND_UP(size, 2); + break; + } + + bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); + if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { + /* Ring 0 flows can be handled by the default Descriptor Ring + * We'll map them to ring 0, but don't enable the filter + */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); + rule->state = BCMGENET_RXNFC_STATE_DISABLED; + } else { + /* Other Rx rings are direct mapped here */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, + fs->ring_cookie); + bcmgenet_hfb_enable_filter(priv, f); + rule->state = BCMGENET_RXNFC_STATE_ENABLED; + } +} + +/* bcmgenet_hfb_clear + * + * Clear Hardware Filter Block and disable all filtering. + */ +static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 base, i; + + base = f_index * priv->hw_params->hfb_filter_size; + for (i = 0; i < priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); +} + +static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) +{ + u32 i; + + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); + + for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) + bcmgenet_rdma_writel(priv, 0x0, i); + + for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) + bcmgenet_hfb_reg_writel(priv, 0x0, + HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); + + for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) + bcmgenet_hfb_clear_filter(priv, i); +} + +static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) +{ + int i; + + INIT_LIST_HEAD(&priv->rxnfc_list); + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { + INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); + priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; + } + + bcmgenet_hfb_clear(priv); +} + +static int bcmgenet_begin(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn on the clock */ + return clk_prepare_enable(priv->clk); +} + +static void bcmgenet_complete(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn off the clock */ + clk_disable_unprepare(priv->clk); +} + +static int bcmgenet_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + phy_ethtool_ksettings_get(dev->phydev, cmd); + + return 0; +} + +static int bcmgenet_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + return phy_ethtool_ksettings_set(dev->phydev, cmd); +} + +static int bcmgenet_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* Make sure we reflect the value of CRC_CMD_FWD */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static u32 bcmgenet_get_msglevel(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + priv->msg_enable = level; +} + +static int bcmgenet_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rx_ring *ring; + unsigned int i; + + ec->tx_max_coalesced_frames = + bcmgenet_tdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_max_coalesced_frames = + bcmgenet_rdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_coalesce_usecs = + bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; + + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ring = &priv->rx_rings[i]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + } + ring = &priv->rx_rings[DESC_INDEX]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + + return 0; +} + +static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring, + u32 usecs, u32 pkts) +{ + struct bcmgenet_priv *priv = ring->priv; + unsigned int i = ring->index; + u32 reg; + + bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH); + + reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); + reg &= ~DMA_TIMEOUT_MASK; + reg |= DIV_ROUND_UP(usecs * 1000, 8192); + bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); +} + +static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, + struct ethtool_coalesce *ec) +{ + struct dim_cq_moder moder; + u32 usecs, pkts; + + ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; + ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { + moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + ring->dim.use_dim = ec->use_adaptive_rx_coalesce; + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +static int bcmgenet_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int i; + + /* Base system clock is 125Mhz, DMA timeout is this reference clock + * divided by 1024, which yields roughly 8.192us, our maximum value + * has to fit in the DMA_TIMEOUT_MASK (16 bits) + */ + if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->tx_max_coalesced_frames == 0 || + ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) + return -EINVAL; + + /* GENET TDMA hardware does not support a configurable timeout, but will + * always generate an interrupt either after MBDONE packets have been + * transmitted, or when the ring is empty. + */ + + /* Program all TX queues with the same values, as there is no + * ethtool knob to do coalescing on a per-queue basis + */ + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tdma_ring_writel(priv, i, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + bcmgenet_tdma_ring_writel(priv, DESC_INDEX, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + + for (i = 0; i < priv->hw_params->rx_queues; i++) + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); + + return 0; +} + +static void bcmgenet_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bcmgenet_priv *priv; + u32 umac_cmd; + + priv = netdev_priv(dev); + + epause->autoneg = priv->autoneg_pause; + + if (netif_carrier_ok(dev)) { + /* report active state when link is up */ + umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD); + epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE); + epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE); + } else { + /* otherwise report stored settings */ + epause->tx_pause = priv->tx_pause; + epause->rx_pause = priv->rx_pause; + } +} + +static int bcmgenet_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev) + return -ENODEV; + + if (!phy_validate_pause(dev->phydev, epause)) + return -EINVAL; + + priv->autoneg_pause = !!epause->autoneg; + priv->tx_pause = !!epause->tx_pause; + priv->rx_pause = !!epause->rx_pause; + + bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); + + return 0; +} + +/* standard ethtool support functions. */ +enum bcmgenet_stat_type { + BCMGENET_STAT_NETDEV = -1, + BCMGENET_STAT_MIB_RX, + BCMGENET_STAT_MIB_TX, + BCMGENET_STAT_RUNT, + BCMGENET_STAT_MISC, + BCMGENET_STAT_SOFT, +}; + +struct bcmgenet_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_sizeof; + int stat_offset; + enum bcmgenet_stat_type type; + /* reg offset from UMAC base for misc counters */ + u16 reg_offset; +}; + +#define STAT_NETDEV(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ + .stat_offset = offsetof(struct net_device_stats, m), \ + .type = BCMGENET_STAT_NETDEV, \ +} + +#define STAT_GENET_MIB(str, m, _type) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = _type, \ +} + +#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) +#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) +#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) +#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) + +#define STAT_GENET_MISC(str, m, offset) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = BCMGENET_STAT_MISC, \ + .reg_offset = offset, \ +} + +#define STAT_GENET_Q(num) \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ + tx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ + tx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ + rx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ + rx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ + rx_rings[num].errors), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ + rx_rings[num].dropped) + +/* There is a 0xC gap between the end of RX and beginning of TX stats and then + * between the end of TX stats and the beginning of the RX RUNT + */ +#define BCMGENET_STAT_OFFSET 0xc + +/* Hardware counters must be kept in sync because the order/offset + * is important here (order in structure declaration = order in hardware) + */ +static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { + /* general stats */ + STAT_NETDEV(rx_packets), + STAT_NETDEV(tx_packets), + STAT_NETDEV(rx_bytes), + STAT_NETDEV(tx_bytes), + STAT_NETDEV(rx_errors), + STAT_NETDEV(tx_errors), + STAT_NETDEV(rx_dropped), + STAT_NETDEV(tx_dropped), + STAT_NETDEV(multicast), + /* UniMAC RSV counters */ + STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), + STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), + STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), + STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), + STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), + STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), + STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), + STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), + STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), + STAT_GENET_MIB_RX("rx_control", mib.rx.cf), + STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), + STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), + STAT_GENET_MIB_RX("rx_align", mib.rx.aln), + STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), + STAT_GENET_MIB_RX("rx_code", mib.rx.cde), + STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), + STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), + STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), + STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), + STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), + STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), + STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), + STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), + /* UniMAC TSV counters */ + STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), + STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), + STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), + STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), + STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), + STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), + STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), + STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), + STAT_GENET_MIB_TX("tx_control", mib.tx.cf), + STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), + STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), + STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), + STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), + STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), + STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), + STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), + STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), + STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), + STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), + STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), + STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), + STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), + STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), + /* UniMAC RUNT counters */ + STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), + STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), + STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), + STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), + /* Misc UniMAC counters */ + STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, + UMAC_RBUF_OVFL_CNT_V1), + STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, + UMAC_RBUF_ERR_CNT_V1), + STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), + STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), + STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb), + STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed", + mib.tx_realloc_tsb_failed), + /* Per TX queues */ + STAT_GENET_Q(0), + STAT_GENET_Q(1), + STAT_GENET_Q(2), + STAT_GENET_Q(3), + STAT_GENET_Q(16), +}; + +#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) + +static void bcmgenet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, "bcmgenet", sizeof(info->driver)); +} + +static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCMGENET_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcmgenet_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset) +{ + u16 new_offset; + u32 val; + + switch (offset) { + case UMAC_RBUF_OVFL_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_OVFL_CNT_V2; + else + new_offset = RBUF_OVFL_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + case UMAC_RBUF_ERR_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_ERR_CNT_V2; + else + new_offset = RBUF_ERR_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + default: + val = bcmgenet_umac_readl(priv, offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, offset); + break; + } + + return val; +} + +static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) +{ + int i, j = 0; + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + u8 offset = 0; + u32 val = 0; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + switch (s->type) { + case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_SOFT: + continue; + case BCMGENET_STAT_RUNT: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_TX: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_RX: + val = bcmgenet_umac_readl(priv, + UMAC_MIB_START + j + offset); + offset = 0; /* Reset Offset */ + break; + case BCMGENET_STAT_MISC: + if (GENET_IS_V1(priv)) { + val = bcmgenet_umac_readl(priv, s->reg_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, + s->reg_offset); + } else { + val = bcmgenet_update_stat_misc(priv, + s->reg_offset); + } + break; + } + + j += s->stat_sizeof; + p = (char *)priv + s->stat_offset; + *(u32 *)p = val; + } +} + +static void bcmgenet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_running(dev)) + bcmgenet_update_mib_counters(priv); + + dev->netdev_ops->ndo_get_stats(dev); + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + if (s->type == BCMGENET_STAT_NETDEV) + p = (char *)&dev->stats; + else + p = (char *)priv; + p += s->stat_offset; + if (sizeof(unsigned long) != sizeof(u32) && + s->stat_sizeof == sizeof(unsigned long)) + data[i] = *(unsigned long *)p; + else + data[i] = *(u32 *)p; + } +} + +void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, + bool tx_lpi_enabled) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; + u32 reg; + + if (enable && !priv->clk_eee_enabled) { + clk_prepare_enable(priv->clk_eee); + priv->clk_eee_enabled = true; + } + + reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); + if (enable) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); + + /* Enable EEE and switch to a 27Mhz clock automatically */ + reg = bcmgenet_readl(priv->base + off); + if (tx_lpi_enabled) + reg |= TBUF_EEE_EN | TBUF_PM_EN; + else + reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); + bcmgenet_writel(reg, priv->base + off); + + /* Do the same for thing for RBUF */ + reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); + if (enable) + reg |= RBUF_EEE_EN | RBUF_PM_EN; + else + reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); + bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); + + if (!enable && priv->clk_eee_enabled) { + clk_disable_unprepare(priv->clk_eee); + priv->clk_eee_enabled = false; + } + + priv->eee.eee_enabled = enable; + priv->eee.eee_active = enable; + priv->eee.tx_lpi_enabled = tx_lpi_enabled; +} + +static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; + e->tx_lpi_enabled = p->tx_lpi_enabled; + e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(dev->phydev, e); +} + +static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + p->eee_enabled = e->eee_enabled; + + if (!p->eee_enabled) { + bcmgenet_eee_enable_set(dev, false, false); + } else { + p->eee_active = phy_init_eee(dev->phydev, false) >= 0; + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); + bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled); + } + + return phy_ethtool_set_eee(dev->phydev, e); +} + +static int bcmgenet_validate_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_usrip4_spec *l4_mask; + struct ethhdr *eth_mask; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) { + netdev_err(dev, "rxnfc: Invalid location (%d)\n", + cmd->fs.location); + return -EINVAL; + } + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case IP_USER_FLOW: + l4_mask = &cmd->fs.m_u.usr_ip4_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(l4_mask->ip4src) || + VALIDATE_MASK(l4_mask->ip4dst) || + VALIDATE_MASK(l4_mask->l4_4_bytes) || + VALIDATE_MASK(l4_mask->proto) || + VALIDATE_MASK(l4_mask->ip_ver) || + VALIDATE_MASK(l4_mask->tos)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + case ETHER_FLOW: + eth_mask = &cmd->fs.m_u.ether_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(eth_mask->h_dest) || + VALIDATE_MASK(eth_mask->h_source) || + VALIDATE_MASK(eth_mask->h_proto)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + default: + netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n", + cmd->fs.flow_type); + return -EINVAL; + } + + if ((cmd->fs.flow_type & FLOW_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || + VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { + netdev_err(dev, "rxnfc: user-def not supported\n"); + return -EINVAL; + } + } + + if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + } + + return 0; +} + +static int bcmgenet_insert_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *loc_rule; + int err; + + if (priv->hw_params->hfb_filter_size < 128) { + netdev_err(dev, "rxnfc: Not supported by this device\n"); + return -EINVAL; + } + + if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && + cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) { + netdev_err(dev, "rxnfc: Unsupported action (%llu)\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + + err = bcmgenet_validate_flow(dev, cmd); + if (err) + return err; + + loc_rule = &priv->rxnfc_rules[cmd->fs.location]; + if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&loc_rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memcpy(&loc_rule->fs, &cmd->fs, + sizeof(struct ethtool_rx_flow_spec)); + + bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); + + list_add_tail(&loc_rule->list, &priv->rxnfc_list); + + return 0; +} + +static int bcmgenet_delete_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[cmd->fs.location]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { + err = -ENOENT; + goto out; + } + + if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); + +out: + return err; +} + +static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = bcmgenet_insert_flow(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = bcmgenet_delete_flow(dev, cmd); + break; + default: + netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n", + cmd->cmd); + return -EINVAL; + } + + return err; +} + +static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, + int loc) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[loc]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) + err = -ENOENT; + else + memcpy(&cmd->fs, &rule->fs, + sizeof(struct ethtool_rx_flow_spec)); + + return err; +} + +static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv) +{ + struct list_head *pos; + int res = 0; + + list_for_each(pos, &priv->rxnfc_list) + res++; + + return res; +} + +static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + int i = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->hw_params->rx_queues ?: 1; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bcmgenet_get_num_flows(priv); + cmd->data = MAX_NUM_OF_FS_RULES; + break; + case ETHTOOL_GRXCLSRULE: + err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (i < cmd->rule_cnt) + rule_locs[i++] = rule->fs.location; + cmd->rule_cnt = i; + cmd->data = MAX_NUM_OF_FS_RULES; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +/* standard ethtool support functions. */ +static const struct ethtool_ops bcmgenet_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .begin = bcmgenet_begin, + .complete = bcmgenet_complete, + .get_strings = bcmgenet_get_strings, + .get_sset_count = bcmgenet_get_sset_count, + .get_ethtool_stats = bcmgenet_get_ethtool_stats, + .get_drvinfo = bcmgenet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = bcmgenet_get_msglevel, + .set_msglevel = bcmgenet_set_msglevel, + .get_wol = bcmgenet_get_wol, + .set_wol = bcmgenet_set_wol, + .get_eee = bcmgenet_get_eee, + .set_eee = bcmgenet_set_eee, + .nway_reset = phy_ethtool_nway_reset, + .get_coalesce = bcmgenet_get_coalesce, + .set_coalesce = bcmgenet_set_coalesce, + .get_link_ksettings = bcmgenet_get_link_ksettings, + .set_link_ksettings = bcmgenet_set_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, + .get_rxnfc = bcmgenet_get_rxnfc, + .set_rxnfc = bcmgenet_set_rxnfc, + .get_pauseparam = bcmgenet_get_pauseparam, + .set_pauseparam = bcmgenet_set_pauseparam, +}; + +/* Power down the unimac, based on mode. */ +static int bcmgenet_power_down(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + int ret = 0; + u32 reg; + + switch (mode) { + case GENET_POWER_CABLE_SENSE: + phy_detach(priv->dev->phydev); + break; + + case GENET_POWER_WOL_MAGIC: + ret = bcmgenet_wol_power_down_cfg(priv, mode); + break; + + case GENET_POWER_PASSIVE: + /* Power down LED */ + if (priv->hw_params->flags & GENET_HAS_EXT) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + if (GENET_IS_V5(priv) && !priv->ephy_16nm) + reg |= EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR; + else + reg |= EXT_PWR_DOWN_PHY; + + reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + + bcmgenet_phy_power_set(priv->dev, false); + } + break; + default: + break; + } + + return ret; +} + +static void bcmgenet_power_up(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (!(priv->hw_params->flags & GENET_HAS_EXT)) + return; + + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + + switch (mode) { + case GENET_POWER_PASSIVE: + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | + EXT_ENERGY_DET_MASK); + if (GENET_IS_V5(priv) && !priv->ephy_16nm) { + reg &= ~(EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR); + reg |= EXT_PHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + mdelay(1); + + reg &= ~EXT_PHY_RESET; + } else { + reg &= ~EXT_PWR_DOWN_PHY; + reg |= EXT_PWR_DN_EN_LD; + } + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + bcmgenet_phy_power_set(priv->dev, true); + break; + + case GENET_POWER_CABLE_SENSE: + /* enable APD */ + if (!GENET_IS_V5(priv)) { + reg |= EXT_PWR_DN_EN_LD; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + break; + case GENET_POWER_WOL_MAGIC: + bcmgenet_wol_power_up_cfg(priv, mode); + return; + default: + break; + } +} + +static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Advancing local write pointer */ + if (ring->write_ptr == ring->end_ptr) + ring->write_ptr = ring->cb_ptr; + else + ring->write_ptr++; + + return tx_cb_ptr; +} + +static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Rewinding local write pointer */ + if (ring->write_ptr == ring->cb_ptr) + ring->write_ptr = ring->end_ptr; + else + ring->write_ptr--; + + return tx_cb_ptr; +} + +static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_SET); +} + +/* Simple helper to free a transmit control block's resources + * Returns an skb when the last transmit control block associated with the + * skb is freed. The skb should be freed by the caller if necessary. + */ +static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + + if (skb) { + cb->skb = NULL; + if (cb == GENET_CB(skb)->first_cb) + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + else + dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + + if (cb == GENET_CB(skb)->last_cb) + return skb; + + } else if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_page(dev, + dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return NULL; +} + +/* Simple helper to free a receive control block's resources */ +static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + cb->skb = NULL; + + if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return skb; +} + +/* Unlocked version of the reclaim routine */ +static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int txbds_processed = 0; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; + unsigned int txbds_ready; + unsigned int c_index; + struct sk_buff *skb; + + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_CLEAR); + else + bcmgenet_intrl2_1_writel(priv, (1 << ring->index), + INTRL2_CPU_CLEAR); + + /* Compute how many buffers are transmitted since last xmit call */ + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) + & DMA_C_INDEX_MASK; + txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, tx_done, dev, + "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + __func__, ring->index, ring->c_index, c_index, txbds_ready); + + /* Reclaim transmitted buffers */ + while (txbds_processed < txbds_ready) { + skb = bcmgenet_free_tx_cb(&priv->pdev->dev, + &priv->tx_cbs[ring->clean_ptr]); + if (skb) { + pkts_compl++; + bytes_compl += GENET_CB(skb)->bytes_sent; + if (!priv->ecdev) + dev_consume_skb_any(skb); + } + + txbds_processed++; + if (likely(ring->clean_ptr < ring->end_ptr)) + ring->clean_ptr++; + else + ring->clean_ptr = ring->cb_ptr; + } + + ring->free_bds += txbds_processed; + ring->c_index = c_index; + + ring->packets += pkts_compl; + ring->bytes += bytes_compl; + + if (!priv->ecdev) + netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), + pkts_compl, bytes_compl); + + return txbds_processed; +} + +static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + unsigned int released; + + spin_lock_bh(&ring->lock); + released = __bcmgenet_tx_reclaim(dev, ring); + spin_unlock_bh(&ring->lock); + + return released; +} + +static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_tx_ring *ring = + container_of(napi, struct bcmgenet_tx_ring, napi); + unsigned int work_done = 0; + struct netdev_queue *txq; + + if (ring->priv->ecdev) + return 0; + + spin_lock(&ring->lock); + work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); + netif_tx_wake_queue(txq); + } + spin_unlock(&ring->lock); + + if (work_done == 0) { + napi_complete(napi); + ring->int_enable(ring); + + return 0; + } + + return budget; +} + +static void bcmgenet_tx_reclaim_all(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_is_multiqueue(dev)) { + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); + } + + bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); +} + +/* Reallocate the SKB to put enough headroom in front of it and insert + * the transmit checksum offsets in the descriptors + */ +static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, + struct sk_buff *skb) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct status_64 *status = NULL; + struct sk_buff *new_skb; + u16 offset; + u8 ip_proto; + __be16 ip_ver; + u32 tx_csum_info; + + if (unlikely(skb_headroom(skb) < sizeof(*status))) { + /* If 64 byte status block enabled, must make sure skb has + * enough headroom for us to insert 64B status block. + */ + new_skb = skb_realloc_headroom(skb, sizeof(*status)); + if (!new_skb) { + dev_kfree_skb_any(skb); + priv->mib.tx_realloc_tsb_failed++; + dev->stats.tx_dropped++; + return NULL; + } + dev_consume_skb_any(skb); + skb = new_skb; + priv->mib.tx_realloc_tsb++; + } + + skb_push(skb, sizeof(*status)); + status = (struct status_64 *)skb->data; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ip_ver = skb->protocol; + switch (ip_ver) { + case htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + /* don't use UDP flag */ + ip_proto = 0; + break; + } + + offset = skb_checksum_start_offset(skb) - sizeof(*status); + tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | + (offset + skb->csum_offset) | + STATUS_TX_CSUM_LV; + + /* Set the special UDP flag for UDP */ + if (ip_proto == IPPROTO_UDP) + tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; + + status->tx_csum_info = tx_csum_info; + } + + return skb; +} + +static void bcmgenet_hide_tsb(struct sk_buff *skb) +{ + __skb_pull(skb, sizeof(struct status_64)); +} + +static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_tx_ring *ring = NULL; + struct enet_cb *tx_cb_ptr; + struct netdev_queue *txq; + int nr_frags, index; + dma_addr_t mapping; + unsigned int size; + skb_frag_t *frag; + u32 len_stat; + int ret; + int i; + + index = skb_get_queue_mapping(skb); + /* Mapping strategy: + * queue_mapping = 0, unclassified, packet xmited through ring16 + * queue_mapping = 1, goes to ring 0. (highest priority queue + * queue_mapping = 2, goes to ring 1. + * queue_mapping = 3, goes to ring 2. + * queue_mapping = 4, goes to ring 3. + */ + if (index == 0) + index = DESC_INDEX; + else + index -= 1; + + ring = &priv->tx_rings[index]; + txq = netdev_get_tx_queue(dev, ring->queue); + + nr_frags = skb_shinfo(skb)->nr_frags; + + spin_lock(&ring->lock); + if (ring->free_bds <= (nr_frags + 1)) { + if (!priv->ecdev && !netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + netdev_err(dev, + "%s: tx ring %d full when queue %d awake\n", + __func__, index, ring->queue); + } + ret = NETDEV_TX_BUSY; + goto out; + } + + /* Retain how many bytes will be sent on the wire, without TSB inserted + * by transmit checksum offload + */ + GENET_CB(skb)->bytes_sent = skb->len; + + /* add the Transmit Status Block */ + skb = bcmgenet_add_tsb(dev, skb); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; + } + + for (i = 0; i <= nr_frags; i++) { + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + + BUG_ON(!tx_cb_ptr); + + if (!i) { + /* Transmit single SKB or head of fragment list */ + GENET_CB(skb)->first_cb = tx_cb_ptr; + size = skb_headlen(skb); + mapping = dma_map_single(kdev, skb->data, size, + DMA_TO_DEVICE); + } else { + /* xmit fragment */ + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + mapping = skb_frag_dma_map(kdev, frag, 0, size, + DMA_TO_DEVICE); + } + + ret = dma_mapping_error(kdev, mapping); + if (ret) { + priv->mib.tx_dma_failed++; + netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); + ret = NETDEV_TX_OK; + goto out_unmap_frags; + } + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, size); + + tx_cb_ptr->skb = skb; + + len_stat = (size << DMA_BUFLENGTH_SHIFT) | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ + if (!i) { + len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; + if (skb->ip_summed == CHECKSUM_PARTIAL) + len_stat |= DMA_TX_DO_CSUM; + } + if (i == nr_frags) + len_stat |= DMA_EOP; + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); + } + + GENET_CB(skb)->last_cb = tx_cb_ptr; + + bcmgenet_hide_tsb(skb); + skb_tx_timestamp(skb); + + /* Decrement total BD count and advance our write pointer */ + ring->free_bds -= nr_frags + 1; + ring->prod_index += nr_frags + 1; + ring->prod_index &= DMA_P_INDEX_MASK; + + if (!priv->ecdev) { + netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); + + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) + netif_tx_stop_queue(txq); + } + + if (priv->ecdev || !netdev_xmit_more() || netif_xmit_stopped(txq)) + /* Packets are ready, update producer index */ + bcmgenet_tdma_ring_writel(priv, ring->index, + ring->prod_index, TDMA_PROD_INDEX); +out: + spin_unlock(&ring->lock); + + return ret; + +out_unmap_frags: + /* Back up for failed control block mapping */ + bcmgenet_put_txcb(priv, ring); + + /* Unmap successfully mapped control blocks */ + while (i-- > 0) { + tx_cb_ptr = bcmgenet_put_txcb(priv, ring); + bcmgenet_free_tx_cb(kdev, tx_cb_ptr); + } + + if (!priv->ecdev) + dev_kfree_skb(skb); + goto out; +} + +static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, + struct enet_cb *cb) +{ + struct device *kdev = &priv->pdev->dev; + struct sk_buff *skb; + struct sk_buff *rx_skb; + dma_addr_t mapping; + + /* Allocate a new Rx skb */ + skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, + GFP_ATOMIC | __GFP_NOWARN); + if (!skb) { + priv->mib.alloc_rx_buff_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb allocation failed\n", __func__); + return NULL; + } + + /* DMA-map the new Rx skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { + priv->mib.rx_dma_failed++; + dev_kfree_skb_any(skb); + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb DMA mapping failed\n", __func__); + return NULL; + } + + /* Grab the current Rx skb from the ring and DMA-unmap it */ + rx_skb = bcmgenet_free_rx_cb(kdev, cb); + + /* Put the new Rx skb on the ring */ + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); + dmadesc_set_addr(priv, cb->bd_addr, mapping); + + /* Return the current Rx skb to caller */ + return rx_skb; +} + +/* bcmgenet_desc_rx - descriptor based rx process. + * this could be called from bottom half, or from NAPI polling method. + */ +static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, + unsigned int budget) +{ + struct bcmgenet_priv *priv = ring->priv; + struct net_device *dev = priv->dev; + struct device *kdev = &priv->pdev->dev; + struct enet_cb *cb; + struct sk_buff *skb; + u32 dma_length_status; + unsigned long dma_flag; + int len; + unsigned int rxpktprocessed = 0, rxpkttoprocess; + unsigned int bytes_processed = 0; + unsigned int p_index, mask; + unsigned int discards; + + if (!priv->ecdev) { + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) { + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_CLEAR); + } else { + mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); + bcmgenet_intrl2_1_writel(priv, + mask, + INTRL2_CPU_CLEAR); + } + } + + p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); + + discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & + DMA_P_INDEX_DISCARD_CNT_MASK; + if (discards > ring->old_discards) { + discards = discards - ring->old_discards; + ring->errors += discards; + ring->old_discards += discards; + + /* Clear HW register when we reach 75% of maximum 0xFFFF */ + if (ring->old_discards >= 0xC000) { + ring->old_discards = 0; + bcmgenet_rdma_ring_writel(priv, ring->index, 0, + RDMA_PROD_INDEX); + } + } + + p_index &= DMA_P_INDEX_MASK; + rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, rx_status, dev, + "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); + + while ((rxpktprocessed < rxpkttoprocess) && + (rxpktprocessed < budget)) { + struct status_64 *status; + __be16 rx_csum; + + cb = &priv->rx_cbs[ring->read_ptr]; + if (priv->ecdev) + /* DMA unmap current skb */ + skb = bcmgenet_free_rx_cb(kdev, cb); + else + skb = bcmgenet_rx_refill(priv, cb); + + if (unlikely(!skb)) { + ring->dropped++; + goto next; + } + + status = (struct status_64 *)skb->data; + dma_length_status = status->length_status; + if (dev->features & NETIF_F_RXCSUM) { + rx_csum = (__force __be16)(status->rx_csum & 0xffff); + if (rx_csum) { + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + } + + /* DMA flags and length are still valid no matter how + * we got the Receive Status Vector (64B RSB or register) + */ + dma_flag = dma_length_status & 0xffff; + len = dma_length_status >> DMA_BUFLENGTH_SHIFT; + + netif_dbg(priv, rx_status, dev, + "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", + __func__, p_index, ring->c_index, + ring->read_ptr, dma_length_status); + + if (unlikely(len > RX_BUF_LENGTH)) { + netif_err(priv, rx_status, dev, "oversized packet\n"); + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + if (!priv->ecdev) + dev_kfree_skb_any(skb); + goto next; + } + + if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { + netif_err(priv, rx_status, dev, + "dropping fragmented packet!\n"); + ring->errors++; + if (!priv->ecdev) + dev_kfree_skb_any(skb); + goto next; + } + + /* report errors */ + if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | + DMA_RX_OV | + DMA_RX_NO | + DMA_RX_LG | + DMA_RX_RXER))) { + netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", + (unsigned int)dma_flag); + if (dma_flag & DMA_RX_CRC_ERROR) + dev->stats.rx_crc_errors++; + if (dma_flag & DMA_RX_OV) + dev->stats.rx_over_errors++; + if (dma_flag & DMA_RX_NO) + dev->stats.rx_frame_errors++; + if (dma_flag & DMA_RX_LG) + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + if (!priv->ecdev) + dev_kfree_skb_any(skb); + goto next; + } /* error packet */ + + if (!priv->ecdev) { + skb_put(skb, len); + + /* remove RSB and hardware 2bytes added for IP alignment */ + skb_pull(skb, 66); + } + len -= 66; + + if (priv->crc_fwd_en) { + if (!priv->ecdev) + skb_trim(skb, len - ETH_FCS_LEN); + len -= ETH_FCS_LEN; + } + + bytes_processed += len; + + /*Finish setting up the received SKB and send it to the kernel*/ + skb->protocol = eth_type_trans(skb, priv->dev); + ring->packets++; + ring->bytes += len; + if (dma_flag & DMA_RX_MULT) + dev->stats.multicast++; + + if (priv->ecdev) { + dma_addr_t mapping; + /* skip status block and padding in skb */ + const unsigned char *data = skb->data + 66; + + ecdev_receive(priv->ecdev, data, len); + + /* remap skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { + priv->mib.rx_dma_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb DMA mapping failed\n", __func__); + dev_kfree_skb_any(skb); + } else { + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); + dmadesc_set_addr(priv, cb->bd_addr, mapping); + } + } else { + /* Notify kernel */ + napi_gro_receive(&ring->napi, skb); + } + netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); + +next: + rxpktprocessed++; + if (likely(ring->read_ptr < ring->end_ptr)) + ring->read_ptr++; + else + ring->read_ptr = ring->cb_ptr; + + ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; + bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); + } + + ring->dim.bytes = bytes_processed; + ring->dim.packets = rxpktprocessed; + + return rxpktprocessed; +} + +/* Rx NAPI polling method */ +static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_rx_ring *ring = container_of(napi, + struct bcmgenet_rx_ring, napi); + struct dim_sample dim_sample = {}; + unsigned int work_done; + + if (ring->priv->ecdev) + return 0; + + work_done = bcmgenet_desc_rx(ring, budget); + + if (work_done < budget) { + napi_complete_done(napi, work_done); + ring->int_enable(ring); + } + + if (ring->dim.use_dim) { + dim_update_sample(ring->dim.event_ctr, ring->dim.packets, + ring->dim.bytes, &dim_sample); + net_dim(&ring->dim.dim, dim_sample); + } + + return work_done; +} + + +/** +* ec_poll - EtherCAT poll routine +* @netdev: net device structure +* +* This function can never fail. +* +**/ +static void ec_poll(struct net_device *netdev) +{ + struct bcmgenet_priv *priv = netdev_priv(netdev); + unsigned i; + int budget = 64; + + bcmgenet_tx_reclaim_all(netdev); + for (i = 0; i < priv->hw_params->rx_queues; i++) { + bcmgenet_desc_rx(&priv->rx_rings[i], budget); + } + + bcmgenet_desc_rx(&priv->rx_rings[DESC_INDEX], budget); +} + +static void bcmgenet_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct bcmgenet_net_dim *ndim = + container_of(dim, struct bcmgenet_net_dim, dim); + struct bcmgenet_rx_ring *ring = + container_of(ndim, struct bcmgenet_rx_ring, dim); + struct dim_cq_moder cur_profile = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts); + dim->state = DIM_START_MEASURE; +} + +/* Assign skb to RX DMA descriptor. */ +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, + struct bcmgenet_rx_ring *ring) +{ + struct enet_cb *cb; + struct sk_buff *skb; + int i; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* loop here for each buffer needing assign */ + for (i = 0; i < ring->size; i++) { + cb = ring->cbs + i; + skb = bcmgenet_rx_refill(priv, cb); + if (skb) + dev_consume_skb_any(skb); + if (!cb->skb) + return -ENOMEM; + } + + return 0; +} + +static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) +{ + struct sk_buff *skb; + struct enet_cb *cb; + int i; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = &priv->rx_cbs[i]; + + skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); + if (skb) + dev_consume_skb_any(skb); + } +} + +static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) +{ + u32 reg; + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + return; + if (enable) + reg |= mask; + else + reg &= ~mask; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + /* UniMAC stops on a packet boundary, wait for a full-size packet + * to be processed + */ + if (enable == 0) + usleep_range(1000, 2000); +} + +static void reset_umac(struct bcmgenet_priv *priv) +{ + /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ + bcmgenet_rbuf_ctrl_set(priv, 0); + udelay(10); + + /* issue soft reset and disable MAC while updating its registers */ + bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); + udelay(2); +} + +static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) +{ + /* Mask all interrupts.*/ + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); +} + +static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) +{ + u32 int0_enable = 0; + + /* Monitor cable plug/unplugged event for internal PHY, external PHY + * and MoCA PHY + */ + if (priv->internal_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + int0_enable |= UMAC_IRQ_PHY_DET_R; + } else if (priv->ext_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + } + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); +} + +static void init_umac(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + u32 reg; + u32 int0_enable = 0; + + dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); + + reset_umac(priv); + + /* clear tx/rx counter */ + bcmgenet_umac_writel(priv, + MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, + UMAC_MIB_CTRL); + bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); + + bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + + /* init tx registers, enable TSB */ + reg = bcmgenet_tbuf_ctrl_get(priv); + reg |= TBUF_64B_EN; + bcmgenet_tbuf_ctrl_set(priv, reg); + + /* init rx registers, enable ip header optimization and RSB */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); + reg |= RBUF_ALIGN_2B | RBUF_64B_EN; + bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); + + /* enable rx checksumming */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); + reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; + /* If UniMAC forwards CRC, we need to skip over it to get + * a valid CHK bit to be set in the per-packet status word + */ + if (priv->crc_fwd_en) + reg |= RBUF_SKIP_FCS; + else + reg &= ~RBUF_SKIP_FCS; + bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL); + + if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) + bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); + + bcmgenet_intr_disable(priv); + + /* Configure backpressure vectors for MoCA */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + reg = bcmgenet_bp_mc_get(priv); + reg |= BIT(priv->hw_params->bp_in_en_shift); + + /* bp_mask: back pressure mask */ + if (netif_is_multiqueue(priv->dev)) + reg |= priv->hw_params->bp_in_mask; + else + reg &= ~priv->hw_params->bp_in_mask; + bcmgenet_bp_mc_set(priv, reg); + } + + /* Enable MDIO interrupts on GENET v3+ */ + if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) { + int0_enable |= UMAC_IRQ_MDIO_ERROR; + if (!priv->ecdev) + int0_enable |= UMAC_IRQ_MDIO_DONE; + } + + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + + dev_dbg(kdev, "done init umac\n"); +} + +static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring, + void (*cb)(struct work_struct *work)) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + + INIT_WORK(&dim->dim.work, cb); + dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + dim->event_ctr = 0; + dim->packets = 0; + dim->bytes = 0; +} + +static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + struct dim_cq_moder moder; + u32 usecs, pkts; + + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + if (ring->priv->ecdev) + return; + + /* If DIM was enabled, re-apply default parameters */ + if (dim->use_dim) { + moder = net_dim_get_def_rx_moderation(dim->dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +/* Initialize a Tx ring along with corresponding hardware registers */ +static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + u32 flow_period_val = 0; + + spin_lock_init(&ring->lock); + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->queue = 0; + ring->int_enable = bcmgenet_tx_ring16_int_enable; + ring->int_disable = bcmgenet_tx_ring16_int_disable; + } else { + ring->queue = index + 1; + ring->int_enable = bcmgenet_tx_ring_int_enable; + ring->int_disable = bcmgenet_tx_ring_int_disable; + } + ring->cbs = priv->tx_cbs + start_ptr; + ring->size = size; + ring->clean_ptr = start_ptr; + ring->c_index = 0; + ring->free_bds = size; + ring->write_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + ring->prod_index = 0; + + /* Set flow period for ring != 16 */ + if (index != DESC_INDEX) + flow_period_val = ENET_MAX_MTU_SIZE << 16; + + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); + /* Disable rate control for now */ + bcmgenet_tdma_ring_writel(priv, index, flow_period_val, + TDMA_FLOW_PERIOD); + bcmgenet_tdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + + /* Set start and end address, read and write pointers */ + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_READ_PTR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_WRITE_PTR); + bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + /* Initialize Tx NAPI */ + if (!priv->ecdev) + netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll); +} + +/* Initialize a RDMA ring */ +static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + int ret; + + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->int_enable = bcmgenet_rx_ring16_int_enable; + ring->int_disable = bcmgenet_rx_ring16_int_disable; + } else { + ring->int_enable = bcmgenet_rx_ring_int_enable; + ring->int_disable = bcmgenet_rx_ring_int_disable; + } + ring->cbs = priv->rx_cbs + start_ptr; + ring->size = size; + ring->c_index = 0; + ring->read_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + + ret = bcmgenet_alloc_rx_buffers(priv, ring); + if (ret) + return ret; + + bcmgenet_init_dim(ring, bcmgenet_dim_work); + bcmgenet_init_rx_coalesce(ring); + + /* Initialize Rx NAPI */ + if (!priv->ecdev) + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll); + + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); + bcmgenet_rdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + bcmgenet_rdma_ring_writel(priv, index, + (DMA_FC_THRESH_LO << + DMA_XOFF_THRESHOLD_SHIFT) | + DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); + + /* Set start and end address, read and write pointers */ + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_READ_PTR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_WRITE_PTR); + bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + return ret; +} + +static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_disable(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_disable(&ring->napi); +} + +static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Tx queues + * + * Queues 0-3 are priority-based, each one has 32 descriptors, + * with queue 0 being the highest priority queue. + * + * Queue 16 is the default Tx queue with + * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. + * + * The transmit control block pool is then partitioned as follows: + * - Tx queue 0 uses tx_cbs[0..31] + * - Tx queue 1 uses tx_cbs[32..63] + * - Tx queue 2 uses tx_cbs[64..95] + * - Tx queue 3 uses tx_cbs[96..127] + * - Tx queue 16 uses tx_cbs[128..255] + */ +static void bcmgenet_init_tx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i, dma_enable; + u32 dma_ctrl, ring_cfg; + u32 dma_priority[3] = {0, 0, 0}; + + dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Enable strict priority arbiter mode */ + bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + + /* Initialize Tx priority queues */ + for (i = 0; i < priv->hw_params->tx_queues; i++) { + bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, + i * priv->hw_params->tx_bds_per_q, + (i + 1) * priv->hw_params->tx_bds_per_q); + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(i)] |= + ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); + } + + /* Initialize Tx default queue 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, + priv->hw_params->tx_queues * + priv->hw_params->tx_bds_per_q, + TOTAL_DESC); + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= + ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << + DMA_PRIO_REG_SHIFT(DESC_INDEX)); + + /* Set Tx queue priorities */ + bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); + bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); + bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); + + /* Enable Tx queues */ + bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Enable Tx DMA */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); +} + +static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); +} + +static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + if (priv->ecdev) + return; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Rx queues + * + * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be + * used to direct traffic to these queues. + * + * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. + */ +static int bcmgenet_init_rx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i; + u32 dma_enable; + u32 dma_ctrl; + u32 ring_cfg; + int ret; + + dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Initialize Rx priority queues */ + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ret = bcmgenet_init_rx_ring(priv, i, + priv->hw_params->rx_bds_per_q, + i * priv->hw_params->rx_bds_per_q, + (i + 1) * + priv->hw_params->rx_bds_per_q); + if (ret) + return ret; + + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + } + + /* Initialize Rx default queue 16 */ + ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, + priv->hw_params->rx_queues * + priv->hw_params->rx_bds_per_q, + TOTAL_DESC); + if (ret) + return ret; + + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + + /* Enable rings */ + bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Configure ring as descriptor ring and re-enable DMA if enabled */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + return 0; +} + +static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) +{ + int ret = 0; + int timeout = 0; + u32 reg; + u32 dma_ctrl; + int i; + + /* Disable TDMA to stop add more frames in TX DMA */ + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + /* Check TDMA status register to confirm TDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_tdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); + ret = -ETIMEDOUT; + } + + /* Wait 10ms for packet drain in both tx and rx dma */ + usleep_range(10000, 20000); + + /* Disable RDMA */ + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + timeout = 0; + /* Check RDMA status register to confirm RDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_rdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); + ret = -ETIMEDOUT; + } + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + return ret; +} + +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +{ + struct netdev_queue *txq; + int i; + + bcmgenet_fini_rx_napi(priv); + bcmgenet_fini_tx_napi(priv); + + for (i = 0; i < priv->num_tx_bds; i++) + dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, + priv->tx_cbs + i)); + + for (i = 0; i < priv->hw_params->tx_queues; i++) { + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); + netdev_tx_reset_queue(txq); + } + + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); + netdev_tx_reset_queue(txq); + + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); +} + +/* init_edma: Initialize DMA control register */ +static int bcmgenet_init_dma(struct bcmgenet_priv *priv) +{ + int ret; + unsigned int i; + struct enet_cb *cb; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* Initialize common Rx ring structures */ + priv->rx_bds = priv->base + priv->hw_params->rdma_offset; + priv->num_rx_bds = TOTAL_DESC; + priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->rx_cbs) + return -ENOMEM; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; + } + + /* Initialize common TX ring structures */ + priv->tx_bds = priv->base + priv->hw_params->tdma_offset; + priv->num_tx_bds = TOTAL_DESC; + priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->tx_cbs) { + kfree(priv->rx_cbs); + return -ENOMEM; + } + + for (i = 0; i < priv->num_tx_bds; i++) { + cb = priv->tx_cbs + i; + cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; + } + + /* Init rDma */ + bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Rx queues */ + ret = bcmgenet_init_rx_queues(priv->dev); + if (ret) { + netdev_err(priv->dev, "failed to initialize Rx queues\n"); + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); + return ret; + } + + /* Init tDma */ + bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Tx queues */ + bcmgenet_init_tx_queues(priv->dev); + + return 0; +} + +/* Interrupt bottom half */ +static void bcmgenet_irq_task(struct work_struct *work) +{ + unsigned int status; + struct bcmgenet_priv *priv = container_of( + work, struct bcmgenet_priv, bcmgenet_irq_work); + + netif_dbg(priv, intr, priv->dev, "%s\n", __func__); + + spin_lock_irq(&priv->lock); + status = priv->irq0_stat; + priv->irq0_stat = 0; + spin_unlock_irq(&priv->lock); + + if (status & UMAC_IRQ_PHY_DET_R && + priv->dev->phydev->autoneg != AUTONEG_ENABLE) { + phy_init_hw(priv->dev->phydev); + genphy_config_aneg(priv->dev->phydev); + } + + /* Link UP/DOWN event */ + if (status & UMAC_IRQ_LINK_EVENT) + phy_mac_interrupt(priv->dev->phydev); + +} + +/* bcmgenet_isr1: handle Rx and Tx priority queues */ +static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int index, status; + + /* Read irq status */ + status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "%s: IRQ=0x%x\n", __func__, status); + + /* Check Rx priority queue interrupts */ + for (index = 0; index < priv->hw_params->rx_queues; index++) { + if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) + continue; + + rx_ring = &priv->rx_rings[index]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + /* Check Tx priority queue interrupts */ + for (index = 0; index < priv->hw_params->tx_queues; index++) { + if (!(status & BIT(index))) + continue; + + tx_ring = &priv->tx_rings[index]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + return IRQ_HANDLED; +} + +/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ +static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int status; + unsigned long flags; + + /* Read irq status */ + status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "IRQ=0x%x\n", status); + + if (status & UMAC_IRQ_RXDMA_DONE) { + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + if (status & UMAC_IRQ_TXDMA_DONE) { + tx_ring = &priv->tx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && + status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { + wake_up(&priv->wq); + } + + /* all other interested interrupts handled in bottom half */ + status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R); + if (status) { + /* Save irq status for bottom-half processing. */ + spin_lock_irqsave(&priv->lock, flags); + priv->irq0_stat |= status; + spin_unlock_irqrestore(&priv->lock, flags); + + schedule_work(&priv->bcmgenet_irq_work); + } + + return IRQ_HANDLED; +} + +static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) +{ + /* Acknowledge the interrupt */ + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bcmgenet_poll_controller(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Invoke the main RX/TX interrupt handler */ + disable_irq(priv->irq0); + bcmgenet_isr0(priv->irq0, priv); + enable_irq(priv->irq0); + + /* And the interrupt handler for RX/TX priority queues */ + disable_irq(priv->irq1); + bcmgenet_isr1(priv->irq1, priv); + enable_irq(priv->irq1); +} +#endif + +static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) +{ + u32 reg; + + reg = bcmgenet_rbuf_ctrl_get(priv); + reg |= BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); + + reg &= ~BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); +} + +static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, + const unsigned char *addr) +{ + bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0); + bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1); +} + +static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, + unsigned char *addr) +{ + u32 addr_tmp; + + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0); + put_unaligned_be32(addr_tmp, &addr[0]); + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1); + put_unaligned_be16(addr_tmp, &addr[4]); +} + +/* Returns a reusable dma control register value */ +static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) +{ + unsigned int i; + u32 reg; + u32 dma_ctrl; + + /* disable DMA */ + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); + udelay(10); + bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); + + return dma_ctrl; +} + +static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) +{ + u32 reg; + + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); +} + +static void bcmgenet_netif_start(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Start the network engine */ + bcmgenet_set_rx_mode(dev); + bcmgenet_enable_rx_napi(priv); + + umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); + + bcmgenet_enable_tx_napi(priv); + + /* Monitor link interrupts now */ + bcmgenet_link_intr_enable(priv); + + phy_start(dev->phydev); +} + +static int bcmgenet_open(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long dma_ctrl; + int ret; + + netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); + + /* Turn on the clock */ + clk_prepare_enable(priv->clk); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + /* take MAC out of reset */ + bcmgenet_umac_reset(priv); + + init_umac(priv); + + /* Apply features again in case we changed them while interface was + * down + */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto err_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + /* HFB init */ + bcmgenet_hfb_init(priv); + + ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq0); + goto err_fini_dma; + } + + ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq1); + goto err_irq0; + } + + ret = bcmgenet_mii_probe(dev); + if (ret) { + netdev_err(dev, "failed to connect to PHY\n"); + goto err_irq1; + } + + bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); + + bcmgenet_netif_start(dev); + + if (!priv->ecdev) + netif_tx_start_all_queues(dev); + + return 0; + +err_irq1: + free_irq(priv->irq1, priv); +err_irq0: + free_irq(priv->irq0, priv); +err_fini_dma: + bcmgenet_dma_teardown(priv); + bcmgenet_fini_dma(priv); +err_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + bcmgenet_disable_tx_napi(priv); + netif_tx_disable(dev); + + /* Disable MAC receive */ + umac_enable_set(priv, CMD_RX_EN, false); + + bcmgenet_dma_teardown(priv); + + /* Disable MAC transmit. TX DMA disabled must be done before this */ + umac_enable_set(priv, CMD_TX_EN, false); + + if (stop_phy) + phy_stop(dev->phydev); + bcmgenet_disable_rx_napi(priv); + bcmgenet_intr_disable(priv); + + /* Wait for pending work items to complete. Since interrupts are + * disabled no new work will be scheduled. + */ + cancel_work_sync(&priv->bcmgenet_irq_work); + + /* tx reclaim */ + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_dma(priv); +} + +static int bcmgenet_close(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); + + bcmgenet_netif_stop(dev, false); + + /* Really kill the PHY state machine and disconnect from it */ + phy_disconnect(dev->phydev); + + free_irq(priv->irq0, priv); + free_irq(priv->irq1, priv); + + if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = ring->priv; + u32 p_index, c_index, intsts, intmsk; + struct netdev_queue *txq; + unsigned int free_bds; + bool txq_stopped; + + if (!netif_msg_tx_err(priv)) + return; + + txq = netdev_get_tx_queue(priv->dev, ring->queue); + + spin_lock(&ring->lock); + if (ring->index == DESC_INDEX) { + intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; + } else { + intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = 1 << ring->index; + } + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); + p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); + txq_stopped = netif_tx_queue_stopped(txq); + free_bds = ring->free_bds; + spin_unlock(&ring->lock); + + netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" + "TX queue status: %s, interrupts: %s\n" + "(sw)free_bds: %d (sw)size: %d\n" + "(sw)p_index: %d (hw)p_index: %d\n" + "(sw)c_index: %d (hw)c_index: %d\n" + "(sw)clean_p: %d (sw)write_p: %d\n" + "(sw)cb_ptr: %d (sw)end_ptr: %d\n", + ring->index, ring->queue, + txq_stopped ? "stopped" : "active", + intsts & intmsk ? "enabled" : "disabled", + free_bds, ring->size, + ring->prod_index, p_index & DMA_P_INDEX_MASK, + ring->c_index, c_index & DMA_C_INDEX_MASK, + ring->clean_ptr, ring->write_ptr, + ring->cb_ptr, ring->end_ptr); +} + +static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 int0_enable = 0; + u32 int1_enable = 0; + unsigned int q; + + netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + bcmgenet_dump_tx_queue(&priv->tx_rings[q]); + bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); + + bcmgenet_tx_reclaim_all(dev); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + int1_enable |= (1 << q); + + int0_enable = UMAC_IRQ_TXDMA_DONE; + + /* Re-enable TX interrupts if disabled */ + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + + netif_trans_update(dev); + + dev->stats.tx_errors++; + + netif_tx_wake_all_queues(dev); +} + +#define MAX_MDF_FILTER 17 + +static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, + const unsigned char *addr, + int *i) +{ + bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], + UMAC_MDF_ADDR + (*i * 4)); + bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | + addr[4] << 8 | addr[5], + UMAC_MDF_ADDR + ((*i + 1) * 4)); + *i += 2; +} + +static void bcmgenet_set_rx_mode(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i, nfilter; + u32 reg; + + netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); + + /* Number of filters needed */ + nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2; + + /* + * Turn on promicuous mode for three scenarios + * 1. IFF_PROMISC flag is set + * 2. IFF_ALLMULTI flag is set + * 3. The number of filters needed exceeds the number filters + * supported by the hardware. + */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || + (nfilter > MAX_MDF_FILTER)) { + reg |= CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); + return; + } else { + reg &= ~CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } + + /* update MDF filter */ + i = 0; + /* Broadcast */ + bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); + /* my own address.*/ + bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); + + /* Unicast */ + netdev_for_each_uc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Multicast */ + netdev_for_each_mc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Enable filters */ + reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); + bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); +} + +/* Set the hardware MAC address. */ +static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + /* Setting the MAC address at the hardware level is not possible + * without disabling the UniMAC RX/TX enable bits. + */ + if (netif_running(dev)) + return -EBUSY; + + eth_hw_addr_set(dev, addr->sa_data); + + return 0; +} + +static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long tx_bytes = 0, tx_packets = 0; + unsigned long rx_bytes = 0, rx_packets = 0; + unsigned long rx_errors = 0, rx_dropped = 0; + struct bcmgenet_tx_ring *tx_ring; + struct bcmgenet_rx_ring *rx_ring; + unsigned int q; + + for (q = 0; q < priv->hw_params->tx_queues; q++) { + tx_ring = &priv->tx_rings[q]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + } + tx_ring = &priv->tx_rings[DESC_INDEX]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + + for (q = 0; q < priv->hw_params->rx_queues; q++) { + rx_ring = &priv->rx_rings[q]; + + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + } + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + + dev->stats.tx_bytes = tx_bytes; + dev->stats.tx_packets = tx_packets; + dev->stats.rx_bytes = rx_bytes; + dev->stats.rx_packets = rx_packets; + dev->stats.rx_errors = rx_errors; + dev->stats.rx_missed_errors = rx_errors; + dev->stats.rx_dropped = rx_dropped; + return &dev->stats; +} + +static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || + priv->phy_interface != PHY_INTERFACE_MODE_MOCA) + return -EOPNOTSUPP; + + if (new_carrier) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + return 0; +} + +static const struct net_device_ops bcmgenet_netdev_ops = { + .ndo_open = bcmgenet_open, + .ndo_stop = bcmgenet_close, + .ndo_start_xmit = bcmgenet_xmit, + .ndo_tx_timeout = bcmgenet_timeout, + .ndo_set_rx_mode = bcmgenet_set_rx_mode, + .ndo_set_mac_address = bcmgenet_set_mac_addr, + .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_set_features = bcmgenet_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcmgenet_poll_controller, +#endif + .ndo_get_stats = bcmgenet_get_stats, + .ndo_change_carrier = bcmgenet_change_carrier, +}; + +/* Array of GENET hardware parameters/characteristics */ +static struct bcmgenet_hw_params bcmgenet_hw_params[] = { + [GENET_V1] = { + .tx_queues = 0, + .tx_bds_per_q = 0, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .hfb_offset = 0x1000, + .rdma_offset = 0x2000, + .tdma_offset = 0x3000, + .words_per_bd = 2, + }, + [GENET_V2] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x1000, + .hfb_reg_offset = 0x2000, + .rdma_offset = 0x3000, + .tdma_offset = 0x4000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT, + }, + [GENET_V3] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x10000, + .tdma_offset = 0x11000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | + GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V4] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V5] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, +}; + +/* Infer hardware parameters from the detected GENET version */ +static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) +{ + struct bcmgenet_hw_params *params; + u32 reg; + u8 major; + u16 gphy_rev; + + if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v4; + } else if (GENET_IS_V3(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V2(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v2; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V1(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v1; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } + + /* enum genet_version starts at 1 */ + priv->hw_params = &bcmgenet_hw_params[priv->version]; + params = priv->hw_params; + + /* Read GENET HW version */ + reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); + major = (reg >> 24 & 0x0f); + if (major == 6) + major = 5; + else if (major == 5) + major = 4; + else if (major == 0) + major = 1; + if (major != priv->version) { + dev_err(&priv->pdev->dev, + "GENET version mismatch, got: %d, configured for: %d\n", + major, priv->version); + } + + /* Print the GENET core version */ + dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, + major, (reg >> 16) & 0x0f, reg & 0xffff); + + /* Store the integrated PHY revision for the MDIO probing function + * to pass this information to the PHY driver. The PHY driver expects + * to find the PHY major revision in bits 15:8 while the GENET register + * stores that information in bits 7:0, account for that. + * + * On newer chips, starting with PHY revision G0, a new scheme is + * deployed similar to the Starfighter 2 switch with GPHY major + * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 + * is reserved as well as special value 0x01ff, we have a small + * heuristic to check for the new GPHY revision and re-arrange things + * so the GPHY driver is happy. + */ + gphy_rev = reg & 0xffff; + + if (GENET_IS_V5(priv)) { + /* The EPHY revision should come from the MDIO registers of + * the PHY not from GENET. + */ + if (gphy_rev != 0) { + pr_warn("GENET is reporting EPHY revision: 0x%04x\n", + gphy_rev); + } + /* This is reserved so should require special treatment */ + } else if (gphy_rev == 0 || gphy_rev == 0x01ff) { + pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); + return; + /* This is the good old scheme, just GPHY major, no minor nor patch */ + } else if ((gphy_rev & 0xf0) != 0) { + priv->gphy_rev = gphy_rev << 8; + /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ + } else if ((gphy_rev & 0xff00) != 0) { + priv->gphy_rev = gphy_rev; + } + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!(params->flags & GENET_HAS_40BITS)) + pr_warn("GENET does not support 40-bits PA\n"); +#endif + + pr_debug("Configuration for version: %d\n" + "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" + "BP << en: %2d, BP msk: 0x%05x\n" + "HFB count: %2d, QTAQ msk: 0x%05x\n" + "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" + "RDMA: 0x%05x, TDMA: 0x%05x\n" + "Words/BD: %d\n", + priv->version, + params->tx_queues, params->tx_bds_per_q, + params->rx_queues, params->rx_bds_per_q, + params->bp_in_en_shift, params->bp_in_mask, + params->hfb_filter_cnt, params->qtag_mask, + params->tbuf_offset, params->hfb_offset, + params->hfb_reg_offset, + params->rdma_offset, params->tdma_offset, + params->words_per_bd); +} + +struct bcmgenet_plat_data { + enum bcmgenet_version version; + u32 dma_max_burst_length; + bool ephy_16nm; +}; + +static const struct bcmgenet_plat_data v1_plat_data = { + .version = GENET_V1, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v2_plat_data = { + .version = GENET_V2, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v3_plat_data = { + .version = GENET_V3, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v4_plat_data = { + .version = GENET_V4, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v5_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data bcm2711_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = 0x08, +}; + +static const struct bcmgenet_plat_data bcm7712_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, + .ephy_16nm = true, +}; + +static const struct of_device_id bcmgenet_match[] = { + { .compatible = "brcm,genet-v1", .data = &v1_plat_data }, + { .compatible = "brcm,genet-v2", .data = &v2_plat_data }, + { .compatible = "brcm,genet-v3", .data = &v3_plat_data }, + { .compatible = "brcm,genet-v4", .data = &v4_plat_data }, + { .compatible = "brcm,genet-v5", .data = &v5_plat_data }, + { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data }, + { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcmgenet_match); + +static int bcmgenet_probe(struct platform_device *pdev) +{ + struct bcmgenet_platform_data *pd = pdev->dev.platform_data; + const struct bcmgenet_plat_data *pdata; + struct bcmgenet_priv *priv; + struct net_device *dev; + unsigned int i; + int err = -EIO; + + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, + GENET_MAX_MQ_CNT + 1); + if (!dev) { + dev_err(&pdev->dev, "can't allocate net device\n"); + return -ENOMEM; + } + + priv = netdev_priv(dev); + priv->irq0 = platform_get_irq(pdev, 0); + if (priv->irq0 < 0) { + err = priv->irq0; + goto err; + } + priv->irq1 = platform_get_irq(pdev, 1); + if (priv->irq1 < 0) { + err = priv->irq1; + goto err; + } + priv->wol_irq = platform_get_irq_optional(pdev, 2); + if (priv->wol_irq == -EPROBE_DEFER) { + err = priv->wol_irq; + goto err; + } + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + err = PTR_ERR(priv->base); + goto err; + } + + spin_lock_init(&priv->lock); + + /* Set default pause parameters */ + priv->autoneg_pause = 1; + priv->tx_pause = 1; + priv->rx_pause = 1; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev_set_drvdata(&pdev->dev, dev); + dev->watchdog_timeo = 2 * HZ; + dev->ethtool_ops = &bcmgenet_ethtool_ops; + dev->netdev_ops = &bcmgenet_netdev_ops; + + priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); + + /* Set default features */ + dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | + NETIF_F_RXCSUM; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; + + /* Request the WOL interrupt and advertise suspend if available */ + priv->wol_irq_disabled = true; + if (priv->wol_irq > 0) { + err = devm_request_irq(&pdev->dev, priv->wol_irq, + bcmgenet_wol_isr, 0, dev->name, priv); + if (!err) + device_set_wakeup_capable(&pdev->dev, 1); + } + + /* Set the needed headroom to account for any possible + * features enabling/disabling at runtime + */ + dev->needed_headroom += 64; + + priv->dev = dev; + priv->pdev = pdev; + + pdata = device_get_match_data(&pdev->dev); + if (pdata) { + priv->version = pdata->version; + priv->dma_max_burst_length = pdata->dma_max_burst_length; + priv->ephy_16nm = pdata->ephy_16nm; + } else { + priv->version = pd->genet_version; + priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; + } + + priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); + if (IS_ERR(priv->clk)) { + dev_dbg(&priv->pdev->dev, "failed to get enet clock\n"); + err = PTR_ERR(priv->clk); + goto err; + } + + err = clk_prepare_enable(priv->clk); + if (err) + goto err; + + bcmgenet_set_hw_params(priv); + + err = -EIO; + if (priv->hw_params->flags & GENET_HAS_40BITS) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err_clk_disable; + + /* Mii wait queue */ + init_waitqueue_head(&priv->wq); + /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ + priv->rx_buf_len = RX_BUF_LENGTH; + INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); + + priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol"); + if (IS_ERR(priv->clk_wol)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); + err = PTR_ERR(priv->clk_wol); + goto err_clk_disable; + } + + priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); + if (IS_ERR(priv->clk_eee)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); + err = PTR_ERR(priv->clk_eee); + goto err_clk_disable; + } + + /* If this is an internal GPHY, power it on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + if (pd && !IS_ERR_OR_NULL(pd->mac_address)) + eth_hw_addr_set(dev, pd->mac_address); + else + if (device_get_ethdev_address(&pdev->dev, dev)) + if (has_acpi_companion(&pdev->dev)) { + u8 addr[ETH_ALEN]; + + bcmgenet_get_hw_addr(priv, addr); + eth_hw_addr_set(dev, addr); + } + + if (!is_valid_ether_addr(dev->dev_addr)) { + dev_warn(&pdev->dev, "using random Ethernet MAC\n"); + eth_hw_addr_random(dev); + } + + reset_umac(priv); + + err = bcmgenet_mii_init(dev); + if (err) + goto err_clk_disable; + + /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues + * just the ring 16 descriptor based TX + */ + netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); + netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); + + /* Set default coalescing parameters */ + for (i = 0; i < priv->hw_params->rx_queues; i++) + priv->rx_rings[i].rx_max_coalesced_frames = 1; + priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; + + /* libphy will determine the link state */ + netif_carrier_off(dev); + + /* Turn off the main clock, WOL clock is handled separately */ + clk_disable_unprepare(priv->clk); + + priv->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + if (priv->ecdev) { + err = ecdev_open(priv->ecdev); + if (err) { + ecdev_withdraw(priv->ecdev); + priv->ecdev = NULL; + bcmgenet_mii_exit(dev); + goto err; + } + } else { + err = register_netdev(dev); + if (err) { + bcmgenet_mii_exit(dev); + goto err; + } + } + + return err; + +err_clk_disable: + clk_disable_unprepare(priv->clk); +err: + free_netdev(dev); + return err; +} + +static int bcmgenet_remove(struct platform_device *pdev) +{ + struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); + + dev_set_drvdata(&pdev->dev, NULL); + if (priv->ecdev) { + ecdev_close(priv->ecdev); + ecdev_withdraw(priv->ecdev); + priv->ecdev = NULL; + } else + unregister_netdev(priv->dev); + bcmgenet_mii_exit(priv->dev); + free_netdev(priv->dev); + + return 0; +} + +static void bcmgenet_shutdown(struct platform_device *pdev) +{ + bcmgenet_remove(pdev); +} + +#ifdef CONFIG_PM_SLEEP +static int bcmgenet_resume_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + u32 reg; + + if (priv->ecdev) + ecdev_open(priv->ecdev); + else if (!netif_running(dev)) + return 0; + + /* Turn on the clock */ + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + if (device_may_wakeup(d) && priv->wolopts) { + /* Account for Wake-on-LAN events and clear those events + * (Some devices need more time between enabling the clocks + * and the interrupt register reflecting the wake event so + * read the register twice) + */ + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + if (reg & UMAC_IRQ_WAKE_EVENT) + pm_wakeup_event(&priv->pdev->dev, 0); + } + + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR); + + return 0; +} + +static int bcmgenet_resume(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + unsigned long dma_ctrl; + int ret; + + if (!netif_running(dev)) + return 0; + + /* From WOL-enabled suspend, switch to regular clock */ + if (device_may_wakeup(d) && priv->wolopts) + bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + bcmgenet_umac_reset(priv); + + init_umac(priv); + + phy_init_hw(dev->phydev); + + /* Speed settings must be restored */ + genphy_config_aneg(dev->phydev); + bcmgenet_mii_config(priv->dev, false); + + /* Restore enabled features */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Restore hardware filters */ + bcmgenet_hfb_clear(priv); + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + bcmgenet_hfb_create_rxnfc_filter(priv, rule); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto out_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + if (!device_may_wakeup(d)) + phy_resume(dev->phydev); + + bcmgenet_netif_start(dev); + + if (priv->ecdev) + ecdev_open(priv->ecdev); + else + netif_device_attach(dev); + + return 0; + +out_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static int bcmgenet_suspend(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (priv->ecdev) { + ecdev_close(priv->ecdev); + } else { + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + } + + bcmgenet_netif_stop(dev, true); + + if (!device_may_wakeup(d)) + phy_suspend(dev->phydev); + + /* Disable filtering */ + bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); + + return 0; +} + +static int bcmgenet_suspend_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + if (priv->ecdev) { + ecdev_close(priv->ecdev); + } else if (!netif_running(dev)) + return 0; + + /* Prepare the device for Wake-on-LAN and switch to the slow clock */ + if (device_may_wakeup(d) && priv->wolopts) + ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); + else if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + /* Let the framework handle resumption and leave the clocks on */ + if (ret) + return ret; + + /* Turn off the clocks */ + clk_disable_unprepare(priv->clk); + + return 0; +} +#else +#define bcmgenet_suspend NULL +#define bcmgenet_suspend_noirq NULL +#define bcmgenet_resume NULL +#define bcmgenet_resume_noirq NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops bcmgenet_pm_ops = { + .suspend = bcmgenet_suspend, + .suspend_noirq = bcmgenet_suspend_noirq, + .resume = bcmgenet_resume, + .resume_noirq = bcmgenet_resume_noirq, +}; + +static const struct acpi_device_id genet_acpi_match[] = { + { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, genet_acpi_match); + +static struct platform_driver bcmgenet_driver = { + .probe = bcmgenet_probe, + .remove = bcmgenet_remove, + .shutdown = bcmgenet_shutdown, + .driver = { + .name = "ec_bcmgenet", + .of_match_table = bcmgenet_match, + .pm = &bcmgenet_pm_ops, + .acpi_match_table = genet_acpi_match, + }, +}; +module_platform_driver(bcmgenet_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver " + "(EtherCAT enabled, Version " REV ")"); +MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: mdio-bcm-unimac"); diff --git a/devices/genet/bcmgenet-6.1-ethercat.h b/devices/genet/bcmgenet-6.1-ethercat.h new file mode 100644 index 00000000..e624e4cf --- /dev/null +++ b/devices/genet/bcmgenet-6.1-ethercat.h @@ -0,0 +1,714 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2020 Broadcom + */ + +#ifndef __BCMGENET_H__ +#define __BCMGENET_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "unimac-6.1-ethercat.h" + +/* EtherCAT header file */ +#include "../ecdev.h" + +/* total number of Buffer Descriptors, same for Rx/Tx */ +#define TOTAL_DESC 256 + +/* which ring is descriptor based */ +#define DESC_INDEX 16 + +/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528. + * 1536 is multiple of 256 bytes + */ +#define ENET_BRCM_TAG_LEN 6 +#define ENET_PAD 8 +#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \ + ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD) +#define DMA_MAX_BURST_LENGTH 0x10 + +/* misc. configuration */ +#define MAX_NUM_OF_FS_RULES 16 +#define CLEAR_ALL_HFB 0xFF +#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4) +#define DMA_FC_THRESH_LO 5 + +/* 64B receive/transmit status block */ +struct status_64 { + u32 length_status; /* length and peripheral status */ + u32 ext_status; /* Extended status*/ + u32 rx_csum; /* partial rx checksum */ + u32 unused1[9]; /* unused */ + u32 tx_csum_info; /* Tx checksum info. */ + u32 unused2[3]; /* unused */ +}; + +/* Rx status bits */ +#define STATUS_RX_EXT_MASK 0x1FFFFF +#define STATUS_RX_CSUM_MASK 0xFFFF +#define STATUS_RX_CSUM_OK 0x10000 +#define STATUS_RX_CSUM_FR 0x20000 +#define STATUS_RX_PROTO_TCP 0 +#define STATUS_RX_PROTO_UDP 1 +#define STATUS_RX_PROTO_ICMP 2 +#define STATUS_RX_PROTO_OTHER 3 +#define STATUS_RX_PROTO_MASK 3 +#define STATUS_RX_PROTO_SHIFT 18 +#define STATUS_FILTER_INDEX_MASK 0xFFFF +/* Tx status bits */ +#define STATUS_TX_CSUM_START_MASK 0X7FFF +#define STATUS_TX_CSUM_START_SHIFT 16 +#define STATUS_TX_CSUM_PROTO_UDP 0x8000 +#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF +#define STATUS_TX_CSUM_LV 0x80000000 + +/* DMA Descriptor */ +#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */ +#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */ +#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */ + +/* Rx/Tx common counter group */ +struct bcmgenet_pkt_counters { + u32 cnt_64; /* RO Received/Transmited 64 bytes packet */ + u32 cnt_127; /* RO Rx/Tx 127 bytes packet */ + u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */ + u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */ + u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */ + u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */ + u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */ + u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/ + u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/ + u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/ +}; + +/* RSV, Receive Status Vector */ +struct bcmgenet_rx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkt; /* RO (0x428) Received pkt count*/ + u32 bytes; /* RO Received byte count */ + u32 mca; /* RO # of Received multicast pkt */ + u32 bca; /* RO # of Receive broadcast pkt */ + u32 fcs; /* RO # of Received FCS error */ + u32 cf; /* RO # of Received control frame pkt*/ + u32 pf; /* RO # of Received pause frame pkt */ + u32 uo; /* RO # of unknown op code pkt */ + u32 aln; /* RO # of alignment error count */ + u32 flr; /* RO # of frame length out of range count */ + u32 cde; /* RO # of code error pkt */ + u32 fcr; /* RO # of carrier sense error pkt */ + u32 ovr; /* RO # of oversize pkt*/ + u32 jbr; /* RO # of jabber count */ + u32 mtue; /* RO # of MTU error pkt*/ + u32 pok; /* RO # of Received good pkt */ + u32 uc; /* RO # of unicast pkt */ + u32 ppp; /* RO # of PPP pkt */ + u32 rcrc; /* RO (0x470),# of CRC match pkt */ +}; + +/* TSV, Transmit Status Vector */ +struct bcmgenet_tx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkts; /* RO (0x4a8) Transmited pkt */ + u32 mca; /* RO # of xmited multicast pkt */ + u32 bca; /* RO # of xmited broadcast pkt */ + u32 pf; /* RO # of xmited pause frame count */ + u32 cf; /* RO # of xmited control frame count */ + u32 fcs; /* RO # of xmited FCS error count */ + u32 ovr; /* RO # of xmited oversize pkt */ + u32 drf; /* RO # of xmited deferral pkt */ + u32 edf; /* RO # of xmited Excessive deferral pkt*/ + u32 scl; /* RO # of xmited single collision pkt */ + u32 mcl; /* RO # of xmited multiple collision pkt*/ + u32 lcl; /* RO # of xmited late collision pkt */ + u32 ecl; /* RO # of xmited excessive collision pkt*/ + u32 frg; /* RO # of xmited fragments pkt*/ + u32 ncl; /* RO # of xmited total collision count */ + u32 jbr; /* RO # of xmited jabber count*/ + u32 bytes; /* RO # of xmited byte count */ + u32 pok; /* RO # of xmited good pkt */ + u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ +}; + +struct bcmgenet_mib_counters { + struct bcmgenet_rx_counters rx; + struct bcmgenet_tx_counters tx; + u32 rx_runt_cnt; + u32 rx_runt_fcs; + u32 rx_runt_fcs_align; + u32 rx_runt_bytes; + u32 rbuf_ovflow_cnt; + u32 rbuf_err_cnt; + u32 mdf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; + u32 tx_realloc_tsb; + u32 tx_realloc_tsb_failed; +}; + +#define UMAC_MIB_START 0x400 + +#define UMAC_MDIO_CMD 0x614 +#define MDIO_START_BUSY (1 << 29) +#define MDIO_READ_FAIL (1 << 28) +#define MDIO_RD (2 << 26) +#define MDIO_WR (1 << 26) +#define MDIO_PMD_SHIFT 21 +#define MDIO_PMD_MASK 0x1F +#define MDIO_REG_SHIFT 16 +#define MDIO_REG_MASK 0x1F + +#define UMAC_RBUF_OVFL_CNT_V1 0x61C +#define RBUF_OVFL_CNT_V2 0x80 +#define RBUF_OVFL_CNT_V3PLUS 0x94 + +#define UMAC_MPD_CTRL 0x620 +#define MPD_EN (1 << 0) +#define MPD_PW_EN (1 << 27) +#define MPD_MSEQ_LEN_SHIFT 16 +#define MPD_MSEQ_LEN_MASK 0xFF + +#define UMAC_MPD_PW_MS 0x624 +#define UMAC_MPD_PW_LS 0x628 +#define UMAC_RBUF_ERR_CNT_V1 0x634 +#define RBUF_ERR_CNT_V2 0x84 +#define RBUF_ERR_CNT_V3PLUS 0x98 +#define UMAC_MDF_ERR_CNT 0x638 +#define UMAC_MDF_CTRL 0x650 +#define UMAC_MDF_ADDR 0x654 +#define UMAC_MIB_CTRL 0x580 +#define MIB_RESET_RX (1 << 0) +#define MIB_RESET_RUNT (1 << 1) +#define MIB_RESET_TX (1 << 2) + +#define RBUF_CTRL 0x00 +#define RBUF_64B_EN (1 << 0) +#define RBUF_ALIGN_2B (1 << 1) +#define RBUF_BAD_DIS (1 << 2) + +#define RBUF_STATUS 0x0C +#define RBUF_STATUS_WOL (1 << 0) +#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1) +#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2) + +#define RBUF_CHK_CTRL 0x14 +#define RBUF_RXCHK_EN (1 << 0) +#define RBUF_SKIP_FCS (1 << 4) +#define RBUF_L3_PARSE_DIS (1 << 5) + +#define RBUF_ENERGY_CTRL 0x9c +#define RBUF_EEE_EN (1 << 0) +#define RBUF_PM_EN (1 << 1) + +#define RBUF_TBUF_SIZE_CTRL 0xb4 + +#define RBUF_HFB_CTRL_V1 0x38 +#define RBUF_HFB_FILTER_EN_SHIFT 16 +#define RBUF_HFB_FILTER_EN_MASK 0xffff0000 +#define RBUF_HFB_EN (1 << 0) +#define RBUF_HFB_256B (1 << 1) +#define RBUF_ACPI_EN (1 << 2) + +#define RBUF_HFB_LEN_V1 0x3C +#define RBUF_FLTR_LEN_MASK 0xFF +#define RBUF_FLTR_LEN_SHIFT 8 + +#define TBUF_CTRL 0x00 +#define TBUF_64B_EN (1 << 0) +#define TBUF_BP_MC 0x0C +#define TBUF_ENERGY_CTRL 0x14 +#define TBUF_EEE_EN (1 << 0) +#define TBUF_PM_EN (1 << 1) + +#define TBUF_CTRL_V1 0x80 +#define TBUF_BP_MC_V1 0xA0 + +#define HFB_CTRL 0x00 +#define HFB_FLT_ENABLE_V3PLUS 0x04 +#define HFB_FLT_LEN_V2 0x04 +#define HFB_FLT_LEN_V3PLUS 0x1C + +/* uniMac intrl2 registers */ +#define INTRL2_CPU_STAT 0x00 +#define INTRL2_CPU_SET 0x04 +#define INTRL2_CPU_CLEAR 0x08 +#define INTRL2_CPU_MASK_STATUS 0x0C +#define INTRL2_CPU_MASK_SET 0x10 +#define INTRL2_CPU_MASK_CLEAR 0x14 + +/* INTRL2 instance 0 definitions */ +#define UMAC_IRQ_SCB (1 << 0) +#define UMAC_IRQ_EPHY (1 << 1) +#define UMAC_IRQ_PHY_DET_R (1 << 2) +#define UMAC_IRQ_PHY_DET_F (1 << 3) +#define UMAC_IRQ_LINK_UP (1 << 4) +#define UMAC_IRQ_LINK_DOWN (1 << 5) +#define UMAC_IRQ_LINK_EVENT (UMAC_IRQ_LINK_UP | UMAC_IRQ_LINK_DOWN) +#define UMAC_IRQ_UMAC (1 << 6) +#define UMAC_IRQ_UMAC_TSV (1 << 7) +#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8) +#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9) +#define UMAC_IRQ_HFB_SM (1 << 10) +#define UMAC_IRQ_HFB_MM (1 << 11) +#define UMAC_IRQ_MPD_R (1 << 12) +#define UMAC_IRQ_WAKE_EVENT (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | \ + UMAC_IRQ_MPD_R) +#define UMAC_IRQ_RXDMA_MBDONE (1 << 13) +#define UMAC_IRQ_RXDMA_PDONE (1 << 14) +#define UMAC_IRQ_RXDMA_BDONE (1 << 15) +#define UMAC_IRQ_RXDMA_DONE UMAC_IRQ_RXDMA_MBDONE +#define UMAC_IRQ_TXDMA_MBDONE (1 << 16) +#define UMAC_IRQ_TXDMA_PDONE (1 << 17) +#define UMAC_IRQ_TXDMA_BDONE (1 << 18) +#define UMAC_IRQ_TXDMA_DONE UMAC_IRQ_TXDMA_MBDONE + +/* Only valid for GENETv3+ */ +#define UMAC_IRQ_MDIO_DONE (1 << 23) +#define UMAC_IRQ_MDIO_ERROR (1 << 24) + +/* INTRL2 instance 1 definitions */ +#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_SHIFT 16 + +/* Register block offsets */ +#define GENET_SYS_OFF 0x0000 +#define GENET_GR_BRIDGE_OFF 0x0040 +#define GENET_EXT_OFF 0x0080 +#define GENET_INTRL2_0_OFF 0x0200 +#define GENET_INTRL2_1_OFF 0x0240 +#define GENET_RBUF_OFF 0x0300 +#define GENET_UMAC_OFF 0x0800 + +/* SYS block offsets and register definitions */ +#define SYS_REV_CTRL 0x00 +#define SYS_PORT_CTRL 0x04 +#define PORT_MODE_INT_EPHY 0 +#define PORT_MODE_INT_GPHY 1 +#define PORT_MODE_EXT_EPHY 2 +#define PORT_MODE_EXT_GPHY 3 +#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4)) +#define PORT_MODE_EXT_RVMII_50 4 +#define LED_ACT_SOURCE_MAC (1 << 9) + +#define SYS_RBUF_FLUSH_CTRL 0x08 +#define SYS_TBUF_FLUSH_CTRL 0x0C +#define RBUF_FLUSH_CTRL_V1 0x04 + +/* Ext block register offsets and definitions */ +#define EXT_EXT_PWR_MGMT 0x00 +#define EXT_PWR_DOWN_BIAS (1 << 0) +#define EXT_PWR_DOWN_DLL (1 << 1) +#define EXT_PWR_DOWN_PHY (1 << 2) +#define EXT_PWR_DN_EN_LD (1 << 3) +#define EXT_ENERGY_DET (1 << 4) +#define EXT_IDDQ_FROM_PHY (1 << 5) +#define EXT_IDDQ_GLBL_PWR (1 << 7) +#define EXT_PHY_RESET (1 << 8) +#define EXT_ENERGY_DET_MASK (1 << 12) +#define EXT_PWR_DOWN_PHY_TX (1 << 16) +#define EXT_PWR_DOWN_PHY_RX (1 << 17) +#define EXT_PWR_DOWN_PHY_SD (1 << 18) +#define EXT_PWR_DOWN_PHY_RD (1 << 19) +#define EXT_PWR_DOWN_PHY_EN (1 << 20) + +#define EXT_RGMII_OOB_CTRL 0x0C +#define RGMII_MODE_EN_V123 (1 << 0) +#define RGMII_LINK (1 << 4) +#define OOB_DISABLE (1 << 5) +#define RGMII_MODE_EN (1 << 6) +#define ID_MODE_DIS (1 << 16) + +#define EXT_GPHY_CTRL 0x1C +#define EXT_CFG_IDDQ_BIAS (1 << 0) +#define EXT_CFG_PWR_DOWN (1 << 1) +#define EXT_CK25_DIS (1 << 4) +#define EXT_CFG_IDDQ_GLOBAL_PWR (1 << 3) +#define EXT_GPHY_RESET (1 << 5) + +/* DMA rings size */ +#define DMA_RING_SIZE (0x40) +#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1)) + +/* DMA registers common definitions */ +#define DMA_RW_POINTER_MASK 0x1FF +#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF +#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16 +#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF +#define DMA_BUFFER_DONE_CNT_SHIFT 16 +#define DMA_P_INDEX_MASK 0xFFFF +#define DMA_C_INDEX_MASK 0xFFFF + +/* DMA ring size register */ +#define DMA_RING_SIZE_MASK 0xFFFF +#define DMA_RING_SIZE_SHIFT 16 +#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF + +/* DMA interrupt threshold register */ +#define DMA_INTR_THRESHOLD_MASK 0x01FF + +/* DMA XON/XOFF register */ +#define DMA_XON_THREHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_SHIFT 16 + +/* DMA flow period register */ +#define DMA_FLOW_PERIOD_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_SHIFT 16 + + +/* DMA control register */ +#define DMA_EN (1 << 0) +#define DMA_RING_BUF_EN_SHIFT 0x01 +#define DMA_RING_BUF_EN_MASK 0xFFFF +#define DMA_TSB_SWAP_EN (1 << 20) + +/* DMA status register */ +#define DMA_DISABLED (1 << 0) +#define DMA_DESC_RAM_INIT_BUSY (1 << 1) + +/* DMA SCB burst size register */ +#define DMA_SCB_BURST_SIZE_MASK 0x1F + +/* DMA activity vector register */ +#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF + +/* DMA backpressure mask register */ +#define DMA_BACKPRESSURE_MASK 0x1FFFF +#define DMA_PFC_ENABLE (1 << 31) + +/* DMA backpressure status register */ +#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF + +/* DMA override register */ +#define DMA_LITTLE_ENDIAN_MODE (1 << 0) +#define DMA_REGISTER_MODE (1 << 1) + +/* DMA timeout register */ +#define DMA_TIMEOUT_MASK 0xFFFF +#define DMA_TIMEOUT_VAL 5000 /* micro seconds */ + +/* TDMA rate limiting control register */ +#define DMA_RATE_LIMIT_EN_MASK 0xFFFF + +/* TDMA arbitration control register */ +#define DMA_ARBITER_MODE_MASK 0x03 +#define DMA_RING_BUF_PRIORITY_MASK 0x1F +#define DMA_RING_BUF_PRIORITY_SHIFT 5 +#define DMA_PRIO_REG_INDEX(q) ((q) / 6) +#define DMA_PRIO_REG_SHIFT(q) (((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT) +#define DMA_RATE_ADJ_MASK 0xFF + +/* Tx/Rx Dma Descriptor common bits*/ +#define DMA_BUFLENGTH_MASK 0x0fff +#define DMA_BUFLENGTH_SHIFT 16 +#define DMA_OWN 0x8000 +#define DMA_EOP 0x4000 +#define DMA_SOP 0x2000 +#define DMA_WRAP 0x1000 +/* Tx specific Dma descriptor bits */ +#define DMA_TX_UNDERRUN 0x0200 +#define DMA_TX_APPEND_CRC 0x0040 +#define DMA_TX_OW_CRC 0x0020 +#define DMA_TX_DO_CSUM 0x0010 +#define DMA_TX_QTAG_SHIFT 7 + +/* Rx Specific Dma descriptor bits */ +#define DMA_RX_CHK_V3PLUS 0x8000 +#define DMA_RX_CHK_V12 0x1000 +#define DMA_RX_BRDCAST 0x0040 +#define DMA_RX_MULT 0x0020 +#define DMA_RX_LG 0x0010 +#define DMA_RX_NO 0x0008 +#define DMA_RX_RXER 0x0004 +#define DMA_RX_CRC_ERROR 0x0002 +#define DMA_RX_OV 0x0001 +#define DMA_RX_FI_MASK 0x001F +#define DMA_RX_FI_SHIFT 0x0007 +#define DMA_DESC_ALLOC_MASK 0x00FF + +#define DMA_ARBITER_RR 0x00 +#define DMA_ARBITER_WRR 0x01 +#define DMA_ARBITER_SP 0x02 + +struct enet_cb { + struct sk_buff *skb; + void __iomem *bd_addr; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* power management mode */ +enum bcmgenet_power_mode { + GENET_POWER_CABLE_SENSE = 0, + GENET_POWER_PASSIVE, + GENET_POWER_WOL_MAGIC, +}; + +struct bcmgenet_priv; + +/* We support both runtime GENET detection and compile-time + * to optimize code-paths for a given hardware + */ +enum bcmgenet_version { + GENET_V1 = 1, + GENET_V2, + GENET_V3, + GENET_V4, + GENET_V5 +}; + +#define GENET_IS_V1(p) ((p)->version == GENET_V1) +#define GENET_IS_V2(p) ((p)->version == GENET_V2) +#define GENET_IS_V3(p) ((p)->version == GENET_V3) +#define GENET_IS_V4(p) ((p)->version == GENET_V4) +#define GENET_IS_V5(p) ((p)->version == GENET_V5) + +/* Hardware flags */ +#define GENET_HAS_40BITS (1 << 0) +#define GENET_HAS_EXT (1 << 1) +#define GENET_HAS_MDIO_INTR (1 << 2) +#define GENET_HAS_MOCA_LINK_DET (1 << 3) + +/* BCMGENET hardware parameters, keep this structure nicely aligned + * since it is going to be used in hot paths + */ +struct bcmgenet_hw_params { + u8 tx_queues; + u8 tx_bds_per_q; + u8 rx_queues; + u8 rx_bds_per_q; + u8 bp_in_en_shift; + u32 bp_in_mask; + u8 hfb_filter_cnt; + u8 hfb_filter_size; + u8 qtag_mask; + u16 tbuf_offset; + u32 hfb_offset; + u32 hfb_reg_offset; + u32 rdma_offset; + u32 tdma_offset; + u32 words_per_bd; + u32 flags; +}; + +struct bcmgenet_skb_cb { + struct enet_cb *first_cb; /* First control block of SKB */ + struct enet_cb *last_cb; /* Last control block of SKB */ + unsigned int bytes_sent; /* bytes on the wire (no TSB) */ +}; + +#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb)) + +struct bcmgenet_tx_ring { + spinlock_t lock; /* ring lock */ + struct napi_struct napi; /* NAPI per tx queue */ + unsigned long packets; + unsigned long bytes; + unsigned int index; /* ring index */ + unsigned int queue; /* queue index */ + struct enet_cb *cbs; /* tx ring buffer control block*/ + unsigned int size; /* size of each tx ring */ + unsigned int clean_ptr; /* Tx ring clean pointer */ + unsigned int c_index; /* last consumer index of each ring*/ + unsigned int free_bds; /* # of free bds for each ring */ + unsigned int write_ptr; /* Tx ring write pointer SW copy */ + unsigned int prod_index; /* Tx ring producer index SW copy */ + unsigned int cb_ptr; /* Tx ring initial CB ptr */ + unsigned int end_ptr; /* Tx ring end CB ptr */ + void (*int_enable)(struct bcmgenet_tx_ring *); + void (*int_disable)(struct bcmgenet_tx_ring *); + struct bcmgenet_priv *priv; +}; + +struct bcmgenet_net_dim { + u16 use_dim; + u16 event_ctr; + unsigned long packets; + unsigned long bytes; + struct dim dim; +}; + +struct bcmgenet_rx_ring { + struct napi_struct napi; /* Rx NAPI struct */ + unsigned long bytes; + unsigned long packets; + unsigned long errors; + unsigned long dropped; + unsigned int index; /* Rx ring index */ + struct enet_cb *cbs; /* Rx ring buffer control block */ + unsigned int size; /* Rx ring size */ + unsigned int c_index; /* Rx last consumer index */ + unsigned int read_ptr; /* Rx ring read pointer */ + unsigned int cb_ptr; /* Rx ring initial CB ptr */ + unsigned int end_ptr; /* Rx ring end CB ptr */ + unsigned int old_discards; + struct bcmgenet_net_dim dim; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs; + void (*int_enable)(struct bcmgenet_rx_ring *); + void (*int_disable)(struct bcmgenet_rx_ring *); + struct bcmgenet_priv *priv; +}; + +enum bcmgenet_rxnfc_state { + BCMGENET_RXNFC_STATE_UNUSED = 0, + BCMGENET_RXNFC_STATE_DISABLED, + BCMGENET_RXNFC_STATE_ENABLED +}; + +struct bcmgenet_rxnfc_rule { + struct list_head list; + struct ethtool_rx_flow_spec fs; + enum bcmgenet_rxnfc_state state; +}; + +/* device context */ +struct bcmgenet_priv { + void __iomem *base; + enum bcmgenet_version version; + struct net_device *dev; + + /* transmit variables */ + void __iomem *tx_bds; + struct enet_cb *tx_cbs; + unsigned int num_tx_bds; + + struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1]; + + /* receive variables */ + void __iomem *rx_bds; + struct enet_cb *rx_cbs; + unsigned int num_rx_bds; + unsigned int rx_buf_len; + struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES]; + struct list_head rxnfc_list; + + struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1]; + + /* other misc variables */ + struct bcmgenet_hw_params *hw_params; + unsigned autoneg_pause:1; + unsigned tx_pause:1; + unsigned rx_pause:1; + + /* MDIO bus variables */ + wait_queue_head_t wq; + bool internal_phy; + struct device_node *phy_dn; + struct device_node *mdio_dn; + struct mii_bus *mii_bus; + u16 gphy_rev; + struct clk *clk_eee; + bool clk_eee_enabled; + + /* PHY device variables */ + phy_interface_t phy_interface; + int phy_addr; + int ext_phy; + bool ephy_16nm; + + /* Interrupt variables */ + struct work_struct bcmgenet_irq_work; + int irq0; + int irq1; + int wol_irq; + bool wol_irq_disabled; + + /* shared status */ + spinlock_t lock; + unsigned int irq0_stat; + + /* HW descriptors/checksum variables */ + bool crc_fwd_en; + + u32 dma_max_burst_length; + + u32 msg_enable; + + struct clk *clk; + struct platform_device *pdev; + struct platform_device *mii_pdev; + + /* WOL */ + struct clk *clk_wol; + u32 wolopts; + u8 sopass[SOPASS_MAX]; + bool wol_active; + + struct bcmgenet_mib_counters mib; + + struct ethtool_eee eee; + /* EtherCAT device variables */ + ec_device_t *ecdev; +}; + +#define GENET_IO_MACRO(name, offset) \ +static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ + u32 off) \ +{ \ + /* MIPS chips strapped for BE will automagically configure the \ + * peripheral registers for CPU-native byte order. \ + */ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + return __raw_readl(priv->base + offset + off); \ + else \ + return readl_relaxed(priv->base + offset + off); \ +} \ +static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \ + u32 val, u32 off) \ +{ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + __raw_writel(val, priv->base + offset + off); \ + else \ + writel_relaxed(val, priv->base + offset + off); \ +} + +GENET_IO_MACRO(ext, GENET_EXT_OFF); +GENET_IO_MACRO(umac, GENET_UMAC_OFF); +GENET_IO_MACRO(sys, GENET_SYS_OFF); + +/* interrupt l2 registers accessors */ +GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF); +GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF); + +/* HFB register accessors */ +GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset); + +/* GENET v2+ HFB control and filter len helpers */ +GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset); + +/* RBUF register accessors */ +GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); + +/* MDIO routines */ +int bcmgenet_mii_init(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); +int bcmgenet_mii_probe(struct net_device *dev); +void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx); +void bcmgenet_phy_power_set(struct net_device *dev, bool enable); +void bcmgenet_mii_setup(struct net_device *dev); + +/* Wake-on-LAN routines */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); + +void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, + bool tx_lpi_enabled); + +#endif /* __BCMGENET_H__ */ diff --git a/devices/genet/bcmgenet-6.1-orig.c b/devices/genet/bcmgenet-6.1-orig.c new file mode 100644 index 00000000..1ae082eb --- /dev/null +++ b/devices/genet/bcmgenet-6.1-orig.c @@ -0,0 +1,4369 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) controller driver + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bcmgenet.h" + +/* Maximum number of hardware queues, downsized if needed */ +#define GENET_MAX_MQ_CNT 4 + +/* Default highest priority queue for multi queue support */ +#define GENET_Q0_PRIORITY 0 + +#define GENET_Q16_RX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) +#define GENET_Q16_TX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) + +#define RX_BUF_LENGTH 2048 +#define SKB_ALIGNMENT 32 + +/* Tx/Rx DMA register offset, skip 256 descriptors */ +#define WORDS_PER_BD(p) (p->hw_params->words_per_bd) +#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) + +#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +/* Forward declarations */ +static void bcmgenet_set_rx_mode(struct net_device *dev); + +static inline void bcmgenet_writel(u32 value, void __iomem *offset) +{ + /* MIPS chips strapped for BE will automagically configure the + * peripheral registers for CPU-native byte order. + */ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + __raw_writel(value, offset); + else + writel_relaxed(value, offset); +} + +static inline u32 bcmgenet_readl(void __iomem *offset) +{ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + return __raw_readl(offset); + else + return readl_relaxed(offset); +} + +static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, + void __iomem *d, u32 value) +{ + bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS); +} + +static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, + void __iomem *d, + dma_addr_t addr) +{ + bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); +#endif +} + +/* Combined address + length/status setter */ +static inline void dmadesc_set(struct bcmgenet_priv *priv, + void __iomem *d, dma_addr_t addr, u32 val) +{ + dmadesc_set_addr(priv, d, addr); + dmadesc_set_length_status(priv, d, val); +} + +static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, + void __iomem *d) +{ + dma_addr_t addr; + + addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32; +#endif + return addr; +} + +#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" + +#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); + else + return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); +} + +static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); + else + bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); +} + +/* These macros are defined to deal with register map change + * between GENET1.1 and GENET2. Only those currently being used + * by driver are defined. + */ +static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); + else + return bcmgenet_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); + else + bcmgenet_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +/* RX/TX DMA register accessors */ +enum dma_reg { + DMA_RING_CFG = 0, + DMA_CTRL, + DMA_STATUS, + DMA_SCB_BURST_SIZE, + DMA_ARB_CTRL, + DMA_PRIORITY_0, + DMA_PRIORITY_1, + DMA_PRIORITY_2, + DMA_INDEX2RING_0, + DMA_INDEX2RING_1, + DMA_INDEX2RING_2, + DMA_INDEX2RING_3, + DMA_INDEX2RING_4, + DMA_INDEX2RING_5, + DMA_INDEX2RING_6, + DMA_INDEX2RING_7, + DMA_RING0_TIMEOUT, + DMA_RING1_TIMEOUT, + DMA_RING2_TIMEOUT, + DMA_RING3_TIMEOUT, + DMA_RING4_TIMEOUT, + DMA_RING5_TIMEOUT, + DMA_RING6_TIMEOUT, + DMA_RING7_TIMEOUT, + DMA_RING8_TIMEOUT, + DMA_RING9_TIMEOUT, + DMA_RING10_TIMEOUT, + DMA_RING11_TIMEOUT, + DMA_RING12_TIMEOUT, + DMA_RING13_TIMEOUT, + DMA_RING14_TIMEOUT, + DMA_RING15_TIMEOUT, + DMA_RING16_TIMEOUT, +}; + +static const u8 bcmgenet_dma_regs_v3plus[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x2C, + [DMA_PRIORITY_0] = 0x30, + [DMA_PRIORITY_1] = 0x34, + [DMA_PRIORITY_2] = 0x38, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, + [DMA_INDEX2RING_0] = 0x70, + [DMA_INDEX2RING_1] = 0x74, + [DMA_INDEX2RING_2] = 0x78, + [DMA_INDEX2RING_3] = 0x7C, + [DMA_INDEX2RING_4] = 0x80, + [DMA_INDEX2RING_5] = 0x84, + [DMA_INDEX2RING_6] = 0x88, + [DMA_INDEX2RING_7] = 0x8C, +}; + +static const u8 bcmgenet_dma_regs_v2[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +static const u8 bcmgenet_dma_regs_v1[] = { + [DMA_CTRL] = 0x00, + [DMA_STATUS] = 0x04, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, + [DMA_RING0_TIMEOUT] = 0x2C, + [DMA_RING1_TIMEOUT] = 0x30, + [DMA_RING2_TIMEOUT] = 0x34, + [DMA_RING3_TIMEOUT] = 0x38, + [DMA_RING4_TIMEOUT] = 0x3c, + [DMA_RING5_TIMEOUT] = 0x40, + [DMA_RING6_TIMEOUT] = 0x44, + [DMA_RING7_TIMEOUT] = 0x48, + [DMA_RING8_TIMEOUT] = 0x4c, + [DMA_RING9_TIMEOUT] = 0x50, + [DMA_RING10_TIMEOUT] = 0x54, + [DMA_RING11_TIMEOUT] = 0x58, + [DMA_RING12_TIMEOUT] = 0x5c, + [DMA_RING13_TIMEOUT] = 0x60, + [DMA_RING14_TIMEOUT] = 0x64, + [DMA_RING15_TIMEOUT] = 0x68, + [DMA_RING16_TIMEOUT] = 0x6C, +}; + +/* Set at runtime once bcmgenet version is known */ +static const u8 *bcmgenet_dma_regs; + +static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) +{ + return netdev_priv(dev_get_drvdata(dev)); +} + +static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +/* RDMA/TDMA ring registers and accessors + * we merge the common fields and just prefix with T/D the registers + * having different meaning depending on the direction + */ +enum dma_ring_reg { + TDMA_READ_PTR = 0, + RDMA_WRITE_PTR = TDMA_READ_PTR, + TDMA_READ_PTR_HI, + RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, + TDMA_CONS_INDEX, + RDMA_PROD_INDEX = TDMA_CONS_INDEX, + TDMA_PROD_INDEX, + RDMA_CONS_INDEX = TDMA_PROD_INDEX, + DMA_RING_BUF_SIZE, + DMA_START_ADDR, + DMA_START_ADDR_HI, + DMA_END_ADDR, + DMA_END_ADDR_HI, + DMA_MBUF_DONE_THRESH, + TDMA_FLOW_PERIOD, + RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, + TDMA_WRITE_PTR, + RDMA_READ_PTR = TDMA_WRITE_PTR, + TDMA_WRITE_PTR_HI, + RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI +}; + +/* GENET v4 supports 40-bits pointer addressing + * for obvious reasons the LO and HI word parts + * are contiguous, but this offsets the other + * registers. + */ +static const u8 genet_dma_ring_regs_v4[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_READ_PTR_HI] = 0x04, + [TDMA_CONS_INDEX] = 0x08, + [TDMA_PROD_INDEX] = 0x0C, + [DMA_RING_BUF_SIZE] = 0x10, + [DMA_START_ADDR] = 0x14, + [DMA_START_ADDR_HI] = 0x18, + [DMA_END_ADDR] = 0x1C, + [DMA_END_ADDR_HI] = 0x20, + [DMA_MBUF_DONE_THRESH] = 0x24, + [TDMA_FLOW_PERIOD] = 0x28, + [TDMA_WRITE_PTR] = 0x2C, + [TDMA_WRITE_PTR_HI] = 0x30, +}; + +static const u8 genet_dma_ring_regs_v123[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_CONS_INDEX] = 0x04, + [TDMA_PROD_INDEX] = 0x08, + [DMA_RING_BUF_SIZE] = 0x0C, + [DMA_START_ADDR] = 0x10, + [DMA_END_ADDR] = 0x14, + [DMA_MBUF_DONE_THRESH] = 0x18, + [TDMA_FLOW_PERIOD] = 0x1C, + [TDMA_WRITE_PTR] = 0x20, +}; + +/* Set at runtime once GENET version is known */ +static const u8 *genet_dma_ring_regs; + +static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg |= (1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg |= RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); +} + +static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset, reg, reg1; + + offset = HFB_FLT_ENABLE_V3PLUS; + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); + if (f_index < 32) { + reg1 &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32)); + } else { + reg &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + } + if (!reg && !reg1) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg &= ~RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } +} + +static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, + u32 f_index, u32 rx_queue) +{ + u32 offset; + u32 reg; + + offset = f_index / 8; + reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); + reg &= ~(0xF << (4 * (f_index % 8))); + reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); + bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); +} + +static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, + u32 f_index, u32 f_length) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_LEN_V3PLUS + + ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * + sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg &= ~(0xFF << (8 * (f_index % 4))); + reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static int bcmgenet_hfb_validate_mask(void *mask, size_t size) +{ + while (size) { + switch (*(unsigned char *)mask++) { + case 0x00: + case 0x0f: + case 0xf0: + case 0xff: + size--; + continue; + default: + return -EINVAL; + } + } + + return 0; +} + +#define VALIDATE_MASK(x) \ + bcmgenet_hfb_validate_mask(&(x), sizeof(x)) + +static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, + u32 offset, void *val, void *mask, + size_t size) +{ + u32 index, tmp; + + index = f_index * priv->hw_params->hfb_filter_size + offset / 2; + tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32)); + + while (size--) { + if (offset++ & 1) { + tmp &= ~0x300FF; + tmp |= (*(unsigned char *)val++); + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0x30000; + break; + case 0xF0: + tmp |= 0x20000; + break; + case 0x0F: + tmp |= 0x10000; + break; + } + bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32)); + if (size) + tmp = bcmgenet_hfb_readl(priv, + index * sizeof(u32)); + } else { + tmp &= ~0xCFF00; + tmp |= (*(unsigned char *)val++) << 8; + switch ((*(unsigned char *)mask++)) { + case 0xFF: + tmp |= 0xC0000; + break; + case 0xF0: + tmp |= 0x80000; + break; + case 0x0F: + tmp |= 0x40000; + break; + } + if (!size) + bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32)); + } + } + + return 0; +} + +static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, + struct bcmgenet_rxnfc_rule *rule) +{ + struct ethtool_rx_flow_spec *fs = &rule->fs; + u32 offset = 0, f_length = 0, f; + u8 val_8, mask_8; + __be16 val_16; + u16 mask_16; + size_t size; + + f = fs->location; + if (fs->flow_type & FLOW_MAC_EXT) { + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_ext.h_dest, &fs->m_ext.h_dest, + sizeof(fs->h_ext.h_dest)); + } + + if (fs->flow_type & FLOW_EXT) { + if (fs->m_ext.vlan_etype || + fs->m_ext.vlan_tci) { + bcmgenet_hfb_insert_data(priv, f, 12, + &fs->h_ext.vlan_etype, + &fs->m_ext.vlan_etype, + sizeof(fs->h_ext.vlan_etype)); + bcmgenet_hfb_insert_data(priv, f, 14, + &fs->h_ext.vlan_tci, + &fs->m_ext.vlan_tci, + sizeof(fs->h_ext.vlan_tci)); + offset += VLAN_HLEN; + f_length += DIV_ROUND_UP(VLAN_HLEN, 2); + } + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN, 2); + bcmgenet_hfb_insert_data(priv, f, 0, + &fs->h_u.ether_spec.h_dest, + &fs->m_u.ether_spec.h_dest, + sizeof(fs->h_u.ether_spec.h_dest)); + bcmgenet_hfb_insert_data(priv, f, ETH_ALEN, + &fs->h_u.ether_spec.h_source, + &fs->m_u.ether_spec.h_source, + sizeof(fs->h_u.ether_spec.h_source)); + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &fs->h_u.ether_spec.h_proto, + &fs->m_u.ether_spec.h_proto, + sizeof(fs->h_u.ether_spec.h_proto)); + break; + case IP_USER_FLOW: + f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2); + /* Specify IP Ether Type */ + val_16 = htons(ETH_P_IP); + mask_16 = 0xFFFF; + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmgenet_hfb_insert_data(priv, f, 15 + offset, + &fs->h_u.usr_ip4_spec.tos, + &fs->m_u.usr_ip4_spec.tos, + sizeof(fs->h_u.usr_ip4_spec.tos)); + bcmgenet_hfb_insert_data(priv, f, 23 + offset, + &fs->h_u.usr_ip4_spec.proto, + &fs->m_u.usr_ip4_spec.proto, + sizeof(fs->h_u.usr_ip4_spec.proto)); + bcmgenet_hfb_insert_data(priv, f, 26 + offset, + &fs->h_u.usr_ip4_spec.ip4src, + &fs->m_u.usr_ip4_spec.ip4src, + sizeof(fs->h_u.usr_ip4_spec.ip4src)); + bcmgenet_hfb_insert_data(priv, f, 30 + offset, + &fs->h_u.usr_ip4_spec.ip4dst, + &fs->m_u.usr_ip4_spec.ip4dst, + sizeof(fs->h_u.usr_ip4_spec.ip4dst)); + if (!fs->m_u.usr_ip4_spec.l4_4_bytes) + break; + + /* Only supports 20 byte IPv4 header */ + val_8 = 0x45; + mask_8 = 0xFF; + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset, + &val_8, &mask_8, + sizeof(val_8)); + size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); + bcmgenet_hfb_insert_data(priv, f, + ETH_HLEN + 20 + offset, + &fs->h_u.usr_ip4_spec.l4_4_bytes, + &fs->m_u.usr_ip4_spec.l4_4_bytes, + size); + f_length += DIV_ROUND_UP(size, 2); + break; + } + + bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); + if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { + /* Ring 0 flows can be handled by the default Descriptor Ring + * We'll map them to ring 0, but don't enable the filter + */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); + rule->state = BCMGENET_RXNFC_STATE_DISABLED; + } else { + /* Other Rx rings are direct mapped here */ + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, + fs->ring_cookie); + bcmgenet_hfb_enable_filter(priv, f); + rule->state = BCMGENET_RXNFC_STATE_ENABLED; + } +} + +/* bcmgenet_hfb_clear + * + * Clear Hardware Filter Block and disable all filtering. + */ +static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 base, i; + + base = f_index * priv->hw_params->hfb_filter_size; + for (i = 0; i < priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); +} + +static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) +{ + u32 i; + + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); + + for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) + bcmgenet_rdma_writel(priv, 0x0, i); + + for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) + bcmgenet_hfb_reg_writel(priv, 0x0, + HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); + + for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) + bcmgenet_hfb_clear_filter(priv, i); +} + +static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) +{ + int i; + + INIT_LIST_HEAD(&priv->rxnfc_list); + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { + INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); + priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; + } + + bcmgenet_hfb_clear(priv); +} + +static int bcmgenet_begin(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn on the clock */ + return clk_prepare_enable(priv->clk); +} + +static void bcmgenet_complete(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Turn off the clock */ + clk_disable_unprepare(priv->clk); +} + +static int bcmgenet_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + phy_ethtool_ksettings_get(dev->phydev, cmd); + + return 0; +} + +static int bcmgenet_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + return phy_ethtool_ksettings_set(dev->phydev, cmd); +} + +static int bcmgenet_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* Make sure we reflect the value of CRC_CMD_FWD */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static u32 bcmgenet_get_msglevel(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + priv->msg_enable = level; +} + +static int bcmgenet_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rx_ring *ring; + unsigned int i; + + ec->tx_max_coalesced_frames = + bcmgenet_tdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_max_coalesced_frames = + bcmgenet_rdma_ring_readl(priv, DESC_INDEX, + DMA_MBUF_DONE_THRESH); + ec->rx_coalesce_usecs = + bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; + + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ring = &priv->rx_rings[i]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + } + ring = &priv->rx_rings[DESC_INDEX]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + + return 0; +} + +static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring, + u32 usecs, u32 pkts) +{ + struct bcmgenet_priv *priv = ring->priv; + unsigned int i = ring->index; + u32 reg; + + bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH); + + reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); + reg &= ~DMA_TIMEOUT_MASK; + reg |= DIV_ROUND_UP(usecs * 1000, 8192); + bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); +} + +static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, + struct ethtool_coalesce *ec) +{ + struct dim_cq_moder moder; + u32 usecs, pkts; + + ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; + ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { + moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + ring->dim.use_dim = ec->use_adaptive_rx_coalesce; + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +static int bcmgenet_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int i; + + /* Base system clock is 125Mhz, DMA timeout is this reference clock + * divided by 1024, which yields roughly 8.192us, our maximum value + * has to fit in the DMA_TIMEOUT_MASK (16 bits) + */ + if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->tx_max_coalesced_frames == 0 || + ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || + ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) + return -EINVAL; + + /* GENET TDMA hardware does not support a configurable timeout, but will + * always generate an interrupt either after MBDONE packets have been + * transmitted, or when the ring is empty. + */ + + /* Program all TX queues with the same values, as there is no + * ethtool knob to do coalescing on a per-queue basis + */ + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tdma_ring_writel(priv, i, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + bcmgenet_tdma_ring_writel(priv, DESC_INDEX, + ec->tx_max_coalesced_frames, + DMA_MBUF_DONE_THRESH); + + for (i = 0; i < priv->hw_params->rx_queues; i++) + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); + + return 0; +} + +static void bcmgenet_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bcmgenet_priv *priv; + u32 umac_cmd; + + priv = netdev_priv(dev); + + epause->autoneg = priv->autoneg_pause; + + if (netif_carrier_ok(dev)) { + /* report active state when link is up */ + umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD); + epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE); + epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE); + } else { + /* otherwise report stored settings */ + epause->tx_pause = priv->tx_pause; + epause->rx_pause = priv->rx_pause; + } +} + +static int bcmgenet_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev) + return -ENODEV; + + if (!phy_validate_pause(dev->phydev, epause)) + return -EINVAL; + + priv->autoneg_pause = !!epause->autoneg; + priv->tx_pause = !!epause->tx_pause; + priv->rx_pause = !!epause->rx_pause; + + bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); + + return 0; +} + +/* standard ethtool support functions. */ +enum bcmgenet_stat_type { + BCMGENET_STAT_NETDEV = -1, + BCMGENET_STAT_MIB_RX, + BCMGENET_STAT_MIB_TX, + BCMGENET_STAT_RUNT, + BCMGENET_STAT_MISC, + BCMGENET_STAT_SOFT, +}; + +struct bcmgenet_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_sizeof; + int stat_offset; + enum bcmgenet_stat_type type; + /* reg offset from UMAC base for misc counters */ + u16 reg_offset; +}; + +#define STAT_NETDEV(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ + .stat_offset = offsetof(struct net_device_stats, m), \ + .type = BCMGENET_STAT_NETDEV, \ +} + +#define STAT_GENET_MIB(str, m, _type) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = _type, \ +} + +#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) +#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) +#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) +#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) + +#define STAT_GENET_MISC(str, m, offset) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = BCMGENET_STAT_MISC, \ + .reg_offset = offset, \ +} + +#define STAT_GENET_Q(num) \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ + tx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ + tx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ + rx_rings[num].bytes), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ + rx_rings[num].packets), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ + rx_rings[num].errors), \ + STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ + rx_rings[num].dropped) + +/* There is a 0xC gap between the end of RX and beginning of TX stats and then + * between the end of TX stats and the beginning of the RX RUNT + */ +#define BCMGENET_STAT_OFFSET 0xc + +/* Hardware counters must be kept in sync because the order/offset + * is important here (order in structure declaration = order in hardware) + */ +static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { + /* general stats */ + STAT_NETDEV(rx_packets), + STAT_NETDEV(tx_packets), + STAT_NETDEV(rx_bytes), + STAT_NETDEV(tx_bytes), + STAT_NETDEV(rx_errors), + STAT_NETDEV(tx_errors), + STAT_NETDEV(rx_dropped), + STAT_NETDEV(tx_dropped), + STAT_NETDEV(multicast), + /* UniMAC RSV counters */ + STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), + STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), + STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), + STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), + STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), + STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), + STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), + STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), + STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), + STAT_GENET_MIB_RX("rx_control", mib.rx.cf), + STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), + STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), + STAT_GENET_MIB_RX("rx_align", mib.rx.aln), + STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), + STAT_GENET_MIB_RX("rx_code", mib.rx.cde), + STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), + STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), + STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), + STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), + STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), + STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), + STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), + STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), + /* UniMAC TSV counters */ + STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), + STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), + STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), + STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), + STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), + STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), + STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), + STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), + STAT_GENET_MIB_TX("tx_control", mib.tx.cf), + STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), + STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), + STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), + STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), + STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), + STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), + STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), + STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), + STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), + STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), + STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), + STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), + STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), + STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), + /* UniMAC RUNT counters */ + STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), + STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), + STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), + STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), + /* Misc UniMAC counters */ + STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, + UMAC_RBUF_OVFL_CNT_V1), + STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, + UMAC_RBUF_ERR_CNT_V1), + STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), + STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), + STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb), + STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed", + mib.tx_realloc_tsb_failed), + /* Per TX queues */ + STAT_GENET_Q(0), + STAT_GENET_Q(1), + STAT_GENET_Q(2), + STAT_GENET_Q(3), + STAT_GENET_Q(16), +}; + +#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) + +static void bcmgenet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, "bcmgenet", sizeof(info->driver)); +} + +static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCMGENET_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcmgenet_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset) +{ + u16 new_offset; + u32 val; + + switch (offset) { + case UMAC_RBUF_OVFL_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_OVFL_CNT_V2; + else + new_offset = RBUF_OVFL_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + case UMAC_RBUF_ERR_CNT_V1: + if (GENET_IS_V2(priv)) + new_offset = RBUF_ERR_CNT_V2; + else + new_offset = RBUF_ERR_CNT_V3PLUS; + + val = bcmgenet_rbuf_readl(priv, new_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_rbuf_writel(priv, 0, new_offset); + break; + default: + val = bcmgenet_umac_readl(priv, offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, offset); + break; + } + + return val; +} + +static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) +{ + int i, j = 0; + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + u8 offset = 0; + u32 val = 0; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + switch (s->type) { + case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_SOFT: + continue; + case BCMGENET_STAT_RUNT: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_TX: + offset += BCMGENET_STAT_OFFSET; + fallthrough; + case BCMGENET_STAT_MIB_RX: + val = bcmgenet_umac_readl(priv, + UMAC_MIB_START + j + offset); + offset = 0; /* Reset Offset */ + break; + case BCMGENET_STAT_MISC: + if (GENET_IS_V1(priv)) { + val = bcmgenet_umac_readl(priv, s->reg_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, + s->reg_offset); + } else { + val = bcmgenet_update_stat_misc(priv, + s->reg_offset); + } + break; + } + + j += s->stat_sizeof; + p = (char *)priv + s->stat_offset; + *(u32 *)p = val; + } +} + +static void bcmgenet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_running(dev)) + bcmgenet_update_mib_counters(priv); + + dev->netdev_ops->ndo_get_stats(dev); + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + if (s->type == BCMGENET_STAT_NETDEV) + p = (char *)&dev->stats; + else + p = (char *)priv; + p += s->stat_offset; + if (sizeof(unsigned long) != sizeof(u32) && + s->stat_sizeof == sizeof(unsigned long)) + data[i] = *(unsigned long *)p; + else + data[i] = *(u32 *)p; + } +} + +void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, + bool tx_lpi_enabled) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; + u32 reg; + + if (enable && !priv->clk_eee_enabled) { + clk_prepare_enable(priv->clk_eee); + priv->clk_eee_enabled = true; + } + + reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); + if (enable) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); + + /* Enable EEE and switch to a 27Mhz clock automatically */ + reg = bcmgenet_readl(priv->base + off); + if (tx_lpi_enabled) + reg |= TBUF_EEE_EN | TBUF_PM_EN; + else + reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); + bcmgenet_writel(reg, priv->base + off); + + /* Do the same for thing for RBUF */ + reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); + if (enable) + reg |= RBUF_EEE_EN | RBUF_PM_EN; + else + reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); + bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); + + if (!enable && priv->clk_eee_enabled) { + clk_disable_unprepare(priv->clk_eee); + priv->clk_eee_enabled = false; + } + + priv->eee.eee_enabled = enable; + priv->eee.eee_active = enable; + priv->eee.tx_lpi_enabled = tx_lpi_enabled; +} + +static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; + e->tx_lpi_enabled = p->tx_lpi_enabled; + e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(dev->phydev, e); +} + +static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + if (!dev->phydev) + return -ENODEV; + + p->eee_enabled = e->eee_enabled; + + if (!p->eee_enabled) { + bcmgenet_eee_enable_set(dev, false, false); + } else { + p->eee_active = phy_init_eee(dev->phydev, false) >= 0; + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); + bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled); + } + + return phy_ethtool_set_eee(dev->phydev, e); +} + +static int bcmgenet_validate_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_usrip4_spec *l4_mask; + struct ethhdr *eth_mask; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) { + netdev_err(dev, "rxnfc: Invalid location (%d)\n", + cmd->fs.location); + return -EINVAL; + } + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case IP_USER_FLOW: + l4_mask = &cmd->fs.m_u.usr_ip4_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(l4_mask->ip4src) || + VALIDATE_MASK(l4_mask->ip4dst) || + VALIDATE_MASK(l4_mask->l4_4_bytes) || + VALIDATE_MASK(l4_mask->proto) || + VALIDATE_MASK(l4_mask->ip_ver) || + VALIDATE_MASK(l4_mask->tos)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + case ETHER_FLOW: + eth_mask = &cmd->fs.m_u.ether_spec; + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(eth_mask->h_dest) || + VALIDATE_MASK(eth_mask->h_source) || + VALIDATE_MASK(eth_mask->h_proto)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + break; + default: + netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n", + cmd->fs.flow_type); + return -EINVAL; + } + + if ((cmd->fs.flow_type & FLOW_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) || + VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) { + netdev_err(dev, "rxnfc: user-def not supported\n"); + return -EINVAL; + } + } + + if ((cmd->fs.flow_type & FLOW_MAC_EXT)) { + /* don't allow mask which isn't valid */ + if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) { + netdev_err(dev, "rxnfc: Unsupported mask\n"); + return -EINVAL; + } + } + + return 0; +} + +static int bcmgenet_insert_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *loc_rule; + int err; + + if (priv->hw_params->hfb_filter_size < 128) { + netdev_err(dev, "rxnfc: Not supported by this device\n"); + return -EINVAL; + } + + if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && + cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) { + netdev_err(dev, "rxnfc: Unsupported action (%llu)\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + + err = bcmgenet_validate_flow(dev, cmd); + if (err) + return err; + + loc_rule = &priv->rxnfc_rules[cmd->fs.location]; + if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&loc_rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memcpy(&loc_rule->fs, &cmd->fs, + sizeof(struct ethtool_rx_flow_spec)); + + bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); + + list_add_tail(&loc_rule->list, &priv->rxnfc_list); + + return 0; +} + +static int bcmgenet_delete_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[cmd->fs.location]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) { + err = -ENOENT; + goto out; + } + + if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { + list_del(&rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } + rule->state = BCMGENET_RXNFC_STATE_UNUSED; + memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); + +out: + return err; +} + +static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = bcmgenet_insert_flow(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = bcmgenet_delete_flow(dev, cmd); + break; + default: + netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n", + cmd->cmd); + return -EINVAL; + } + + return err; +} + +static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, + int loc) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + + if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->rxnfc_rules[loc]; + if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) + err = -ENOENT; + else + memcpy(&cmd->fs, &rule->fs, + sizeof(struct ethtool_rx_flow_spec)); + + return err; +} + +static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv) +{ + struct list_head *pos; + int res = 0; + + list_for_each(pos, &priv->rxnfc_list) + res++; + + return res; +} + +static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + int err = 0; + int i = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->hw_params->rx_queues ?: 1; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bcmgenet_get_num_flows(priv); + cmd->data = MAX_NUM_OF_FS_RULES; + break; + case ETHTOOL_GRXCLSRULE: + err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (i < cmd->rule_cnt) + rule_locs[i++] = rule->fs.location; + cmd->rule_cnt = i; + cmd->data = MAX_NUM_OF_FS_RULES; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +/* standard ethtool support functions. */ +static const struct ethtool_ops bcmgenet_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .begin = bcmgenet_begin, + .complete = bcmgenet_complete, + .get_strings = bcmgenet_get_strings, + .get_sset_count = bcmgenet_get_sset_count, + .get_ethtool_stats = bcmgenet_get_ethtool_stats, + .get_drvinfo = bcmgenet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = bcmgenet_get_msglevel, + .set_msglevel = bcmgenet_set_msglevel, + .get_wol = bcmgenet_get_wol, + .set_wol = bcmgenet_set_wol, + .get_eee = bcmgenet_get_eee, + .set_eee = bcmgenet_set_eee, + .nway_reset = phy_ethtool_nway_reset, + .get_coalesce = bcmgenet_get_coalesce, + .set_coalesce = bcmgenet_set_coalesce, + .get_link_ksettings = bcmgenet_get_link_ksettings, + .set_link_ksettings = bcmgenet_set_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, + .get_rxnfc = bcmgenet_get_rxnfc, + .set_rxnfc = bcmgenet_set_rxnfc, + .get_pauseparam = bcmgenet_get_pauseparam, + .set_pauseparam = bcmgenet_set_pauseparam, +}; + +/* Power down the unimac, based on mode. */ +static int bcmgenet_power_down(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + int ret = 0; + u32 reg; + + switch (mode) { + case GENET_POWER_CABLE_SENSE: + phy_detach(priv->dev->phydev); + break; + + case GENET_POWER_WOL_MAGIC: + ret = bcmgenet_wol_power_down_cfg(priv, mode); + break; + + case GENET_POWER_PASSIVE: + /* Power down LED */ + if (priv->hw_params->flags & GENET_HAS_EXT) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + if (GENET_IS_V5(priv) && !priv->ephy_16nm) + reg |= EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR; + else + reg |= EXT_PWR_DOWN_PHY; + + reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + + bcmgenet_phy_power_set(priv->dev, false); + } + break; + default: + break; + } + + return ret; +} + +static void bcmgenet_power_up(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (!(priv->hw_params->flags & GENET_HAS_EXT)) + return; + + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + + switch (mode) { + case GENET_POWER_PASSIVE: + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | + EXT_ENERGY_DET_MASK); + if (GENET_IS_V5(priv) && !priv->ephy_16nm) { + reg &= ~(EXT_PWR_DOWN_PHY_EN | + EXT_PWR_DOWN_PHY_RD | + EXT_PWR_DOWN_PHY_SD | + EXT_PWR_DOWN_PHY_RX | + EXT_PWR_DOWN_PHY_TX | + EXT_IDDQ_GLBL_PWR); + reg |= EXT_PHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + mdelay(1); + + reg &= ~EXT_PHY_RESET; + } else { + reg &= ~EXT_PWR_DOWN_PHY; + reg |= EXT_PWR_DN_EN_LD; + } + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + bcmgenet_phy_power_set(priv->dev, true); + break; + + case GENET_POWER_CABLE_SENSE: + /* enable APD */ + if (!GENET_IS_V5(priv)) { + reg |= EXT_PWR_DN_EN_LD; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + break; + case GENET_POWER_WOL_MAGIC: + bcmgenet_wol_power_up_cfg(priv, mode); + return; + default: + break; + } +} + +static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Advancing local write pointer */ + if (ring->write_ptr == ring->end_ptr) + ring->write_ptr = ring->cb_ptr; + else + ring->write_ptr++; + + return tx_cb_ptr; +} + +static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Rewinding local write pointer */ + if (ring->write_ptr == ring->cb_ptr) + ring->write_ptr = ring->end_ptr; + else + ring->write_ptr--; + + return tx_cb_ptr; +} + +static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_SET); +} + +/* Simple helper to free a transmit control block's resources + * Returns an skb when the last transmit control block associated with the + * skb is freed. The skb should be freed by the caller if necessary. + */ +static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + + if (skb) { + cb->skb = NULL; + if (cb == GENET_CB(skb)->first_cb) + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + else + dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + + if (cb == GENET_CB(skb)->last_cb) + return skb; + + } else if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_page(dev, + dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return NULL; +} + +/* Simple helper to free a receive control block's resources */ +static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + cb->skb = NULL; + + if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return skb; +} + +/* Unlocked version of the reclaim routine */ +static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned int txbds_processed = 0; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; + unsigned int txbds_ready; + unsigned int c_index; + struct sk_buff *skb; + + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_CLEAR); + else + bcmgenet_intrl2_1_writel(priv, (1 << ring->index), + INTRL2_CPU_CLEAR); + + /* Compute how many buffers are transmitted since last xmit call */ + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) + & DMA_C_INDEX_MASK; + txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, tx_done, dev, + "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + __func__, ring->index, ring->c_index, c_index, txbds_ready); + + /* Reclaim transmitted buffers */ + while (txbds_processed < txbds_ready) { + skb = bcmgenet_free_tx_cb(&priv->pdev->dev, + &priv->tx_cbs[ring->clean_ptr]); + if (skb) { + pkts_compl++; + bytes_compl += GENET_CB(skb)->bytes_sent; + dev_consume_skb_any(skb); + } + + txbds_processed++; + if (likely(ring->clean_ptr < ring->end_ptr)) + ring->clean_ptr++; + else + ring->clean_ptr = ring->cb_ptr; + } + + ring->free_bds += txbds_processed; + ring->c_index = c_index; + + ring->packets += pkts_compl; + ring->bytes += bytes_compl; + + netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), + pkts_compl, bytes_compl); + + return txbds_processed; +} + +static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + unsigned int released; + + spin_lock_bh(&ring->lock); + released = __bcmgenet_tx_reclaim(dev, ring); + spin_unlock_bh(&ring->lock); + + return released; +} + +static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_tx_ring *ring = + container_of(napi, struct bcmgenet_tx_ring, napi); + unsigned int work_done = 0; + struct netdev_queue *txq; + + spin_lock(&ring->lock); + work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); + netif_tx_wake_queue(txq); + } + spin_unlock(&ring->lock); + + if (work_done == 0) { + napi_complete(napi); + ring->int_enable(ring); + + return 0; + } + + return budget; +} + +static void bcmgenet_tx_reclaim_all(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_is_multiqueue(dev)) { + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); + } + + bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); +} + +/* Reallocate the SKB to put enough headroom in front of it and insert + * the transmit checksum offsets in the descriptors + */ +static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, + struct sk_buff *skb) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct status_64 *status = NULL; + struct sk_buff *new_skb; + u16 offset; + u8 ip_proto; + __be16 ip_ver; + u32 tx_csum_info; + + if (unlikely(skb_headroom(skb) < sizeof(*status))) { + /* If 64 byte status block enabled, must make sure skb has + * enough headroom for us to insert 64B status block. + */ + new_skb = skb_realloc_headroom(skb, sizeof(*status)); + if (!new_skb) { + dev_kfree_skb_any(skb); + priv->mib.tx_realloc_tsb_failed++; + dev->stats.tx_dropped++; + return NULL; + } + dev_consume_skb_any(skb); + skb = new_skb; + priv->mib.tx_realloc_tsb++; + } + + skb_push(skb, sizeof(*status)); + status = (struct status_64 *)skb->data; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ip_ver = skb->protocol; + switch (ip_ver) { + case htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + /* don't use UDP flag */ + ip_proto = 0; + break; + } + + offset = skb_checksum_start_offset(skb) - sizeof(*status); + tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | + (offset + skb->csum_offset) | + STATUS_TX_CSUM_LV; + + /* Set the special UDP flag for UDP */ + if (ip_proto == IPPROTO_UDP) + tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; + + status->tx_csum_info = tx_csum_info; + } + + return skb; +} + +static void bcmgenet_hide_tsb(struct sk_buff *skb) +{ + __skb_pull(skb, sizeof(struct status_64)); +} + +static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_tx_ring *ring = NULL; + struct enet_cb *tx_cb_ptr; + struct netdev_queue *txq; + int nr_frags, index; + dma_addr_t mapping; + unsigned int size; + skb_frag_t *frag; + u32 len_stat; + int ret; + int i; + + index = skb_get_queue_mapping(skb); + /* Mapping strategy: + * queue_mapping = 0, unclassified, packet xmited through ring16 + * queue_mapping = 1, goes to ring 0. (highest priority queue + * queue_mapping = 2, goes to ring 1. + * queue_mapping = 3, goes to ring 2. + * queue_mapping = 4, goes to ring 3. + */ + if (index == 0) + index = DESC_INDEX; + else + index -= 1; + + ring = &priv->tx_rings[index]; + txq = netdev_get_tx_queue(dev, ring->queue); + + nr_frags = skb_shinfo(skb)->nr_frags; + + spin_lock(&ring->lock); + if (ring->free_bds <= (nr_frags + 1)) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + netdev_err(dev, + "%s: tx ring %d full when queue %d awake\n", + __func__, index, ring->queue); + } + ret = NETDEV_TX_BUSY; + goto out; + } + + /* Retain how many bytes will be sent on the wire, without TSB inserted + * by transmit checksum offload + */ + GENET_CB(skb)->bytes_sent = skb->len; + + /* add the Transmit Status Block */ + skb = bcmgenet_add_tsb(dev, skb); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; + } + + for (i = 0; i <= nr_frags; i++) { + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + + BUG_ON(!tx_cb_ptr); + + if (!i) { + /* Transmit single SKB or head of fragment list */ + GENET_CB(skb)->first_cb = tx_cb_ptr; + size = skb_headlen(skb); + mapping = dma_map_single(kdev, skb->data, size, + DMA_TO_DEVICE); + } else { + /* xmit fragment */ + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + mapping = skb_frag_dma_map(kdev, frag, 0, size, + DMA_TO_DEVICE); + } + + ret = dma_mapping_error(kdev, mapping); + if (ret) { + priv->mib.tx_dma_failed++; + netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); + ret = NETDEV_TX_OK; + goto out_unmap_frags; + } + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, size); + + tx_cb_ptr->skb = skb; + + len_stat = (size << DMA_BUFLENGTH_SHIFT) | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ + if (!i) { + len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; + if (skb->ip_summed == CHECKSUM_PARTIAL) + len_stat |= DMA_TX_DO_CSUM; + } + if (i == nr_frags) + len_stat |= DMA_EOP; + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); + } + + GENET_CB(skb)->last_cb = tx_cb_ptr; + + bcmgenet_hide_tsb(skb); + skb_tx_timestamp(skb); + + /* Decrement total BD count and advance our write pointer */ + ring->free_bds -= nr_frags + 1; + ring->prod_index += nr_frags + 1; + ring->prod_index &= DMA_P_INDEX_MASK; + + netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); + + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) + netif_tx_stop_queue(txq); + + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) + /* Packets are ready, update producer index */ + bcmgenet_tdma_ring_writel(priv, ring->index, + ring->prod_index, TDMA_PROD_INDEX); +out: + spin_unlock(&ring->lock); + + return ret; + +out_unmap_frags: + /* Back up for failed control block mapping */ + bcmgenet_put_txcb(priv, ring); + + /* Unmap successfully mapped control blocks */ + while (i-- > 0) { + tx_cb_ptr = bcmgenet_put_txcb(priv, ring); + bcmgenet_free_tx_cb(kdev, tx_cb_ptr); + } + + dev_kfree_skb(skb); + goto out; +} + +static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, + struct enet_cb *cb) +{ + struct device *kdev = &priv->pdev->dev; + struct sk_buff *skb; + struct sk_buff *rx_skb; + dma_addr_t mapping; + + /* Allocate a new Rx skb */ + skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT, + GFP_ATOMIC | __GFP_NOWARN); + if (!skb) { + priv->mib.alloc_rx_buff_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb allocation failed\n", __func__); + return NULL; + } + + /* DMA-map the new Rx skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { + priv->mib.rx_dma_failed++; + dev_kfree_skb_any(skb); + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb DMA mapping failed\n", __func__); + return NULL; + } + + /* Grab the current Rx skb from the ring and DMA-unmap it */ + rx_skb = bcmgenet_free_rx_cb(kdev, cb); + + /* Put the new Rx skb on the ring */ + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); + dmadesc_set_addr(priv, cb->bd_addr, mapping); + + /* Return the current Rx skb to caller */ + return rx_skb; +} + +/* bcmgenet_desc_rx - descriptor based rx process. + * this could be called from bottom half, or from NAPI polling method. + */ +static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, + unsigned int budget) +{ + struct bcmgenet_priv *priv = ring->priv; + struct net_device *dev = priv->dev; + struct enet_cb *cb; + struct sk_buff *skb; + u32 dma_length_status; + unsigned long dma_flag; + int len; + unsigned int rxpktprocessed = 0, rxpkttoprocess; + unsigned int bytes_processed = 0; + unsigned int p_index, mask; + unsigned int discards; + + /* Clear status before servicing to reduce spurious interrupts */ + if (ring->index == DESC_INDEX) { + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_CLEAR); + } else { + mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); + bcmgenet_intrl2_1_writel(priv, + mask, + INTRL2_CPU_CLEAR); + } + + p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); + + discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & + DMA_P_INDEX_DISCARD_CNT_MASK; + if (discards > ring->old_discards) { + discards = discards - ring->old_discards; + ring->errors += discards; + ring->old_discards += discards; + + /* Clear HW register when we reach 75% of maximum 0xFFFF */ + if (ring->old_discards >= 0xC000) { + ring->old_discards = 0; + bcmgenet_rdma_ring_writel(priv, ring->index, 0, + RDMA_PROD_INDEX); + } + } + + p_index &= DMA_P_INDEX_MASK; + rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK; + + netif_dbg(priv, rx_status, dev, + "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); + + while ((rxpktprocessed < rxpkttoprocess) && + (rxpktprocessed < budget)) { + struct status_64 *status; + __be16 rx_csum; + + cb = &priv->rx_cbs[ring->read_ptr]; + skb = bcmgenet_rx_refill(priv, cb); + + if (unlikely(!skb)) { + ring->dropped++; + goto next; + } + + status = (struct status_64 *)skb->data; + dma_length_status = status->length_status; + if (dev->features & NETIF_F_RXCSUM) { + rx_csum = (__force __be16)(status->rx_csum & 0xffff); + if (rx_csum) { + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + } + + /* DMA flags and length are still valid no matter how + * we got the Receive Status Vector (64B RSB or register) + */ + dma_flag = dma_length_status & 0xffff; + len = dma_length_status >> DMA_BUFLENGTH_SHIFT; + + netif_dbg(priv, rx_status, dev, + "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", + __func__, p_index, ring->c_index, + ring->read_ptr, dma_length_status); + + if (unlikely(len > RX_BUF_LENGTH)) { + netif_err(priv, rx_status, dev, "oversized packet\n"); + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto next; + } + + if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { + netif_err(priv, rx_status, dev, + "dropping fragmented packet!\n"); + ring->errors++; + dev_kfree_skb_any(skb); + goto next; + } + + /* report errors */ + if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | + DMA_RX_OV | + DMA_RX_NO | + DMA_RX_LG | + DMA_RX_RXER))) { + netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", + (unsigned int)dma_flag); + if (dma_flag & DMA_RX_CRC_ERROR) + dev->stats.rx_crc_errors++; + if (dma_flag & DMA_RX_OV) + dev->stats.rx_over_errors++; + if (dma_flag & DMA_RX_NO) + dev->stats.rx_frame_errors++; + if (dma_flag & DMA_RX_LG) + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto next; + } /* error packet */ + + skb_put(skb, len); + + /* remove RSB and hardware 2bytes added for IP alignment */ + skb_pull(skb, 66); + len -= 66; + + if (priv->crc_fwd_en) { + skb_trim(skb, len - ETH_FCS_LEN); + len -= ETH_FCS_LEN; + } + + bytes_processed += len; + + /*Finish setting up the received SKB and send it to the kernel*/ + skb->protocol = eth_type_trans(skb, priv->dev); + ring->packets++; + ring->bytes += len; + if (dma_flag & DMA_RX_MULT) + dev->stats.multicast++; + + /* Notify kernel */ + napi_gro_receive(&ring->napi, skb); + netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); + +next: + rxpktprocessed++; + if (likely(ring->read_ptr < ring->end_ptr)) + ring->read_ptr++; + else + ring->read_ptr = ring->cb_ptr; + + ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; + bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); + } + + ring->dim.bytes = bytes_processed; + ring->dim.packets = rxpktprocessed; + + return rxpktprocessed; +} + +/* Rx NAPI polling method */ +static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_rx_ring *ring = container_of(napi, + struct bcmgenet_rx_ring, napi); + struct dim_sample dim_sample = {}; + unsigned int work_done; + + work_done = bcmgenet_desc_rx(ring, budget); + + if (work_done < budget) { + napi_complete_done(napi, work_done); + ring->int_enable(ring); + } + + if (ring->dim.use_dim) { + dim_update_sample(ring->dim.event_ctr, ring->dim.packets, + ring->dim.bytes, &dim_sample); + net_dim(&ring->dim.dim, dim_sample); + } + + return work_done; +} + +static void bcmgenet_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct bcmgenet_net_dim *ndim = + container_of(dim, struct bcmgenet_net_dim, dim); + struct bcmgenet_rx_ring *ring = + container_of(ndim, struct bcmgenet_rx_ring, dim); + struct dim_cq_moder cur_profile = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts); + dim->state = DIM_START_MEASURE; +} + +/* Assign skb to RX DMA descriptor. */ +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, + struct bcmgenet_rx_ring *ring) +{ + struct enet_cb *cb; + struct sk_buff *skb; + int i; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* loop here for each buffer needing assign */ + for (i = 0; i < ring->size; i++) { + cb = ring->cbs + i; + skb = bcmgenet_rx_refill(priv, cb); + if (skb) + dev_consume_skb_any(skb); + if (!cb->skb) + return -ENOMEM; + } + + return 0; +} + +static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) +{ + struct sk_buff *skb; + struct enet_cb *cb; + int i; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = &priv->rx_cbs[i]; + + skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); + if (skb) + dev_consume_skb_any(skb); + } +} + +static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) +{ + u32 reg; + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + return; + if (enable) + reg |= mask; + else + reg &= ~mask; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + /* UniMAC stops on a packet boundary, wait for a full-size packet + * to be processed + */ + if (enable == 0) + usleep_range(1000, 2000); +} + +static void reset_umac(struct bcmgenet_priv *priv) +{ + /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ + bcmgenet_rbuf_ctrl_set(priv, 0); + udelay(10); + + /* issue soft reset and disable MAC while updating its registers */ + bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); + udelay(2); +} + +static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) +{ + /* Mask all interrupts.*/ + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); +} + +static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) +{ + u32 int0_enable = 0; + + /* Monitor cable plug/unplugged event for internal PHY, external PHY + * and MoCA PHY + */ + if (priv->internal_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + int0_enable |= UMAC_IRQ_PHY_DET_R; + } else if (priv->ext_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + } + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); +} + +static void init_umac(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + u32 reg; + u32 int0_enable = 0; + + dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); + + reset_umac(priv); + + /* clear tx/rx counter */ + bcmgenet_umac_writel(priv, + MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, + UMAC_MIB_CTRL); + bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); + + bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + + /* init tx registers, enable TSB */ + reg = bcmgenet_tbuf_ctrl_get(priv); + reg |= TBUF_64B_EN; + bcmgenet_tbuf_ctrl_set(priv, reg); + + /* init rx registers, enable ip header optimization and RSB */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); + reg |= RBUF_ALIGN_2B | RBUF_64B_EN; + bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); + + /* enable rx checksumming */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); + reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; + /* If UniMAC forwards CRC, we need to skip over it to get + * a valid CHK bit to be set in the per-packet status word + */ + if (priv->crc_fwd_en) + reg |= RBUF_SKIP_FCS; + else + reg &= ~RBUF_SKIP_FCS; + bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL); + + if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) + bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); + + bcmgenet_intr_disable(priv); + + /* Configure backpressure vectors for MoCA */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + reg = bcmgenet_bp_mc_get(priv); + reg |= BIT(priv->hw_params->bp_in_en_shift); + + /* bp_mask: back pressure mask */ + if (netif_is_multiqueue(priv->dev)) + reg |= priv->hw_params->bp_in_mask; + else + reg &= ~priv->hw_params->bp_in_mask; + bcmgenet_bp_mc_set(priv, reg); + } + + /* Enable MDIO interrupts on GENET v3+ */ + if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) + int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); + + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + + dev_dbg(kdev, "done init umac\n"); +} + +static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring, + void (*cb)(struct work_struct *work)) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + + INIT_WORK(&dim->dim.work, cb); + dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + dim->event_ctr = 0; + dim->packets = 0; + dim->bytes = 0; +} + +static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + struct dim_cq_moder moder; + u32 usecs, pkts; + + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + /* If DIM was enabled, re-apply default parameters */ + if (dim->use_dim) { + moder = net_dim_get_def_rx_moderation(dim->dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + +/* Initialize a Tx ring along with corresponding hardware registers */ +static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + u32 flow_period_val = 0; + + spin_lock_init(&ring->lock); + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->queue = 0; + ring->int_enable = bcmgenet_tx_ring16_int_enable; + ring->int_disable = bcmgenet_tx_ring16_int_disable; + } else { + ring->queue = index + 1; + ring->int_enable = bcmgenet_tx_ring_int_enable; + ring->int_disable = bcmgenet_tx_ring_int_disable; + } + ring->cbs = priv->tx_cbs + start_ptr; + ring->size = size; + ring->clean_ptr = start_ptr; + ring->c_index = 0; + ring->free_bds = size; + ring->write_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + ring->prod_index = 0; + + /* Set flow period for ring != 16 */ + if (index != DESC_INDEX) + flow_period_val = ENET_MAX_MTU_SIZE << 16; + + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); + /* Disable rate control for now */ + bcmgenet_tdma_ring_writel(priv, index, flow_period_val, + TDMA_FLOW_PERIOD); + bcmgenet_tdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + + /* Set start and end address, read and write pointers */ + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_READ_PTR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_WRITE_PTR); + bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + /* Initialize Tx NAPI */ + netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll); +} + +/* Initialize a RDMA ring */ +static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + int ret; + + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->int_enable = bcmgenet_rx_ring16_int_enable; + ring->int_disable = bcmgenet_rx_ring16_int_disable; + } else { + ring->int_enable = bcmgenet_rx_ring_int_enable; + ring->int_disable = bcmgenet_rx_ring_int_disable; + } + ring->cbs = priv->rx_cbs + start_ptr; + ring->size = size; + ring->c_index = 0; + ring->read_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + + ret = bcmgenet_alloc_rx_buffers(priv, ring); + if (ret) + return ret; + + bcmgenet_init_dim(ring, bcmgenet_dim_work); + bcmgenet_init_rx_coalesce(ring); + + /* Initialize Rx NAPI */ + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll); + + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); + bcmgenet_rdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + bcmgenet_rdma_ring_writel(priv, index, + (DMA_FC_THRESH_LO << + DMA_XOFF_THRESHOLD_SHIFT) | + DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); + + /* Set start and end address, read and write pointers */ + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_READ_PTR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_WRITE_PTR); + bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + return ret; +} + +static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_disable(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_disable(&ring->napi); +} + +static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Tx queues + * + * Queues 0-3 are priority-based, each one has 32 descriptors, + * with queue 0 being the highest priority queue. + * + * Queue 16 is the default Tx queue with + * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. + * + * The transmit control block pool is then partitioned as follows: + * - Tx queue 0 uses tx_cbs[0..31] + * - Tx queue 1 uses tx_cbs[32..63] + * - Tx queue 2 uses tx_cbs[64..95] + * - Tx queue 3 uses tx_cbs[96..127] + * - Tx queue 16 uses tx_cbs[128..255] + */ +static void bcmgenet_init_tx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i, dma_enable; + u32 dma_ctrl, ring_cfg; + u32 dma_priority[3] = {0, 0, 0}; + + dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Enable strict priority arbiter mode */ + bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + + /* Initialize Tx priority queues */ + for (i = 0; i < priv->hw_params->tx_queues; i++) { + bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, + i * priv->hw_params->tx_bds_per_q, + (i + 1) * priv->hw_params->tx_bds_per_q); + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(i)] |= + ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); + } + + /* Initialize Tx default queue 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, + priv->hw_params->tx_queues * + priv->hw_params->tx_bds_per_q, + TOTAL_DESC); + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= + ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << + DMA_PRIO_REG_SHIFT(DESC_INDEX)); + + /* Set Tx queue priorities */ + bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); + bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); + bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); + + /* Enable Tx queues */ + bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Enable Tx DMA */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); +} + +static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_enable(&ring->napi); + ring->int_enable(ring); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_enable(&ring->napi); + ring->int_enable(ring); +} + +static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); +} + +static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Rx queues + * + * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be + * used to direct traffic to these queues. + * + * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. + */ +static int bcmgenet_init_rx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i; + u32 dma_enable; + u32 dma_ctrl; + u32 ring_cfg; + int ret; + + dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Initialize Rx priority queues */ + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ret = bcmgenet_init_rx_ring(priv, i, + priv->hw_params->rx_bds_per_q, + i * priv->hw_params->rx_bds_per_q, + (i + 1) * + priv->hw_params->rx_bds_per_q); + if (ret) + return ret; + + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + } + + /* Initialize Rx default queue 16 */ + ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, + priv->hw_params->rx_queues * + priv->hw_params->rx_bds_per_q, + TOTAL_DESC); + if (ret) + return ret; + + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + + /* Enable rings */ + bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Configure ring as descriptor ring and re-enable DMA if enabled */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + return 0; +} + +static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) +{ + int ret = 0; + int timeout = 0; + u32 reg; + u32 dma_ctrl; + int i; + + /* Disable TDMA to stop add more frames in TX DMA */ + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + /* Check TDMA status register to confirm TDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_tdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); + ret = -ETIMEDOUT; + } + + /* Wait 10ms for packet drain in both tx and rx dma */ + usleep_range(10000, 20000); + + /* Disable RDMA */ + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + timeout = 0; + /* Check RDMA status register to confirm RDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_rdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); + ret = -ETIMEDOUT; + } + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 0; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + return ret; +} + +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +{ + struct netdev_queue *txq; + int i; + + bcmgenet_fini_rx_napi(priv); + bcmgenet_fini_tx_napi(priv); + + for (i = 0; i < priv->num_tx_bds; i++) + dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, + priv->tx_cbs + i)); + + for (i = 0; i < priv->hw_params->tx_queues; i++) { + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); + netdev_tx_reset_queue(txq); + } + + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); + netdev_tx_reset_queue(txq); + + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); +} + +/* init_edma: Initialize DMA control register */ +static int bcmgenet_init_dma(struct bcmgenet_priv *priv) +{ + int ret; + unsigned int i; + struct enet_cb *cb; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* Initialize common Rx ring structures */ + priv->rx_bds = priv->base + priv->hw_params->rdma_offset; + priv->num_rx_bds = TOTAL_DESC; + priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->rx_cbs) + return -ENOMEM; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; + } + + /* Initialize common TX ring structures */ + priv->tx_bds = priv->base + priv->hw_params->tdma_offset; + priv->num_tx_bds = TOTAL_DESC; + priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->tx_cbs) { + kfree(priv->rx_cbs); + return -ENOMEM; + } + + for (i = 0; i < priv->num_tx_bds; i++) { + cb = priv->tx_cbs + i; + cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; + } + + /* Init rDma */ + bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Rx queues */ + ret = bcmgenet_init_rx_queues(priv->dev); + if (ret) { + netdev_err(priv->dev, "failed to initialize Rx queues\n"); + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); + return ret; + } + + /* Init tDma */ + bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); + + /* Initialize Tx queues */ + bcmgenet_init_tx_queues(priv->dev); + + return 0; +} + +/* Interrupt bottom half */ +static void bcmgenet_irq_task(struct work_struct *work) +{ + unsigned int status; + struct bcmgenet_priv *priv = container_of( + work, struct bcmgenet_priv, bcmgenet_irq_work); + + netif_dbg(priv, intr, priv->dev, "%s\n", __func__); + + spin_lock_irq(&priv->lock); + status = priv->irq0_stat; + priv->irq0_stat = 0; + spin_unlock_irq(&priv->lock); + + if (status & UMAC_IRQ_PHY_DET_R && + priv->dev->phydev->autoneg != AUTONEG_ENABLE) { + phy_init_hw(priv->dev->phydev); + genphy_config_aneg(priv->dev->phydev); + } + + /* Link UP/DOWN event */ + if (status & UMAC_IRQ_LINK_EVENT) + phy_mac_interrupt(priv->dev->phydev); + +} + +/* bcmgenet_isr1: handle Rx and Tx priority queues */ +static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int index, status; + + /* Read irq status */ + status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "%s: IRQ=0x%x\n", __func__, status); + + /* Check Rx priority queue interrupts */ + for (index = 0; index < priv->hw_params->rx_queues; index++) { + if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) + continue; + + rx_ring = &priv->rx_rings[index]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + /* Check Tx priority queue interrupts */ + for (index = 0; index < priv->hw_params->tx_queues; index++) { + if (!(status & BIT(index))) + continue; + + tx_ring = &priv->tx_rings[index]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + return IRQ_HANDLED; +} + +/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ +static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int status; + unsigned long flags; + + /* Read irq status */ + status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "IRQ=0x%x\n", status); + + if (status & UMAC_IRQ_RXDMA_DONE) { + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_ring->dim.event_ctr++; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule_irqoff(&rx_ring->napi); + } + } + + if (status & UMAC_IRQ_TXDMA_DONE) { + tx_ring = &priv->tx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule_irqoff(&tx_ring->napi); + } + } + + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && + status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { + wake_up(&priv->wq); + } + + /* all other interested interrupts handled in bottom half */ + status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R); + if (status) { + /* Save irq status for bottom-half processing. */ + spin_lock_irqsave(&priv->lock, flags); + priv->irq0_stat |= status; + spin_unlock_irqrestore(&priv->lock, flags); + + schedule_work(&priv->bcmgenet_irq_work); + } + + return IRQ_HANDLED; +} + +static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) +{ + /* Acknowledge the interrupt */ + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bcmgenet_poll_controller(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Invoke the main RX/TX interrupt handler */ + disable_irq(priv->irq0); + bcmgenet_isr0(priv->irq0, priv); + enable_irq(priv->irq0); + + /* And the interrupt handler for RX/TX priority queues */ + disable_irq(priv->irq1); + bcmgenet_isr1(priv->irq1, priv); + enable_irq(priv->irq1); +} +#endif + +static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) +{ + u32 reg; + + reg = bcmgenet_rbuf_ctrl_get(priv); + reg |= BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); + + reg &= ~BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); +} + +static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, + const unsigned char *addr) +{ + bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0); + bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1); +} + +static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, + unsigned char *addr) +{ + u32 addr_tmp; + + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0); + put_unaligned_be32(addr_tmp, &addr[0]); + addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1); + put_unaligned_be16(addr_tmp, &addr[4]); +} + +/* Returns a reusable dma control register value */ +static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) +{ + unsigned int i; + u32 reg; + u32 dma_ctrl; + + /* disable DMA */ + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); + udelay(10); + bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); + + return dma_ctrl; +} + +static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) +{ + u32 reg; + + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); +} + +static void bcmgenet_netif_start(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Start the network engine */ + bcmgenet_set_rx_mode(dev); + bcmgenet_enable_rx_napi(priv); + + umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); + + bcmgenet_enable_tx_napi(priv); + + /* Monitor link interrupts now */ + bcmgenet_link_intr_enable(priv); + + phy_start(dev->phydev); +} + +static int bcmgenet_open(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long dma_ctrl; + int ret; + + netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); + + /* Turn on the clock */ + clk_prepare_enable(priv->clk); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + /* take MAC out of reset */ + bcmgenet_umac_reset(priv); + + init_umac(priv); + + /* Apply features again in case we changed them while interface was + * down + */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto err_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + /* HFB init */ + bcmgenet_hfb_init(priv); + + ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq0); + goto err_fini_dma; + } + + ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq1); + goto err_irq0; + } + + ret = bcmgenet_mii_probe(dev); + if (ret) { + netdev_err(dev, "failed to connect to PHY\n"); + goto err_irq1; + } + + bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); + + bcmgenet_netif_start(dev); + + netif_tx_start_all_queues(dev); + + return 0; + +err_irq1: + free_irq(priv->irq1, priv); +err_irq0: + free_irq(priv->irq0, priv); +err_fini_dma: + bcmgenet_dma_teardown(priv); + bcmgenet_fini_dma(priv); +err_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + bcmgenet_disable_tx_napi(priv); + netif_tx_disable(dev); + + /* Disable MAC receive */ + umac_enable_set(priv, CMD_RX_EN, false); + + bcmgenet_dma_teardown(priv); + + /* Disable MAC transmit. TX DMA disabled must be done before this */ + umac_enable_set(priv, CMD_TX_EN, false); + + if (stop_phy) + phy_stop(dev->phydev); + bcmgenet_disable_rx_napi(priv); + bcmgenet_intr_disable(priv); + + /* Wait for pending work items to complete. Since interrupts are + * disabled no new work will be scheduled. + */ + cancel_work_sync(&priv->bcmgenet_irq_work); + + /* tx reclaim */ + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_dma(priv); +} + +static int bcmgenet_close(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); + + bcmgenet_netif_stop(dev, false); + + /* Really kill the PHY state machine and disconnect from it */ + phy_disconnect(dev->phydev); + + free_irq(priv->irq0, priv); + free_irq(priv->irq1, priv); + + if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = ring->priv; + u32 p_index, c_index, intsts, intmsk; + struct netdev_queue *txq; + unsigned int free_bds; + bool txq_stopped; + + if (!netif_msg_tx_err(priv)) + return; + + txq = netdev_get_tx_queue(priv->dev, ring->queue); + + spin_lock(&ring->lock); + if (ring->index == DESC_INDEX) { + intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; + } else { + intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = 1 << ring->index; + } + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); + p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); + txq_stopped = netif_tx_queue_stopped(txq); + free_bds = ring->free_bds; + spin_unlock(&ring->lock); + + netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" + "TX queue status: %s, interrupts: %s\n" + "(sw)free_bds: %d (sw)size: %d\n" + "(sw)p_index: %d (hw)p_index: %d\n" + "(sw)c_index: %d (hw)c_index: %d\n" + "(sw)clean_p: %d (sw)write_p: %d\n" + "(sw)cb_ptr: %d (sw)end_ptr: %d\n", + ring->index, ring->queue, + txq_stopped ? "stopped" : "active", + intsts & intmsk ? "enabled" : "disabled", + free_bds, ring->size, + ring->prod_index, p_index & DMA_P_INDEX_MASK, + ring->c_index, c_index & DMA_C_INDEX_MASK, + ring->clean_ptr, ring->write_ptr, + ring->cb_ptr, ring->end_ptr); +} + +static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 int0_enable = 0; + u32 int1_enable = 0; + unsigned int q; + + netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + bcmgenet_dump_tx_queue(&priv->tx_rings[q]); + bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); + + bcmgenet_tx_reclaim_all(dev); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + int1_enable |= (1 << q); + + int0_enable = UMAC_IRQ_TXDMA_DONE; + + /* Re-enable TX interrupts if disabled */ + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + + netif_trans_update(dev); + + dev->stats.tx_errors++; + + netif_tx_wake_all_queues(dev); +} + +#define MAX_MDF_FILTER 17 + +static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, + const unsigned char *addr, + int *i) +{ + bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], + UMAC_MDF_ADDR + (*i * 4)); + bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | + addr[4] << 8 | addr[5], + UMAC_MDF_ADDR + ((*i + 1) * 4)); + *i += 2; +} + +static void bcmgenet_set_rx_mode(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i, nfilter; + u32 reg; + + netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); + + /* Number of filters needed */ + nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2; + + /* + * Turn on promicuous mode for three scenarios + * 1. IFF_PROMISC flag is set + * 2. IFF_ALLMULTI flag is set + * 3. The number of filters needed exceeds the number filters + * supported by the hardware. + */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || + (nfilter > MAX_MDF_FILTER)) { + reg |= CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); + return; + } else { + reg &= ~CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } + + /* update MDF filter */ + i = 0; + /* Broadcast */ + bcmgenet_set_mdf_addr(priv, dev->broadcast, &i); + /* my own address.*/ + bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i); + + /* Unicast */ + netdev_for_each_uc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Multicast */ + netdev_for_each_mc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i); + + /* Enable filters */ + reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter); + bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); +} + +/* Set the hardware MAC address. */ +static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + /* Setting the MAC address at the hardware level is not possible + * without disabling the UniMAC RX/TX enable bits. + */ + if (netif_running(dev)) + return -EBUSY; + + eth_hw_addr_set(dev, addr->sa_data); + + return 0; +} + +static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long tx_bytes = 0, tx_packets = 0; + unsigned long rx_bytes = 0, rx_packets = 0; + unsigned long rx_errors = 0, rx_dropped = 0; + struct bcmgenet_tx_ring *tx_ring; + struct bcmgenet_rx_ring *rx_ring; + unsigned int q; + + for (q = 0; q < priv->hw_params->tx_queues; q++) { + tx_ring = &priv->tx_rings[q]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + } + tx_ring = &priv->tx_rings[DESC_INDEX]; + tx_bytes += tx_ring->bytes; + tx_packets += tx_ring->packets; + + for (q = 0; q < priv->hw_params->rx_queues; q++) { + rx_ring = &priv->rx_rings[q]; + + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + } + rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_bytes += rx_ring->bytes; + rx_packets += rx_ring->packets; + rx_errors += rx_ring->errors; + rx_dropped += rx_ring->dropped; + + dev->stats.tx_bytes = tx_bytes; + dev->stats.tx_packets = tx_packets; + dev->stats.rx_bytes = rx_bytes; + dev->stats.rx_packets = rx_packets; + dev->stats.rx_errors = rx_errors; + dev->stats.rx_missed_errors = rx_errors; + dev->stats.rx_dropped = rx_dropped; + return &dev->stats; +} + +static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || + priv->phy_interface != PHY_INTERFACE_MODE_MOCA) + return -EOPNOTSUPP; + + if (new_carrier) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + return 0; +} + +static const struct net_device_ops bcmgenet_netdev_ops = { + .ndo_open = bcmgenet_open, + .ndo_stop = bcmgenet_close, + .ndo_start_xmit = bcmgenet_xmit, + .ndo_tx_timeout = bcmgenet_timeout, + .ndo_set_rx_mode = bcmgenet_set_rx_mode, + .ndo_set_mac_address = bcmgenet_set_mac_addr, + .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_set_features = bcmgenet_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcmgenet_poll_controller, +#endif + .ndo_get_stats = bcmgenet_get_stats, + .ndo_change_carrier = bcmgenet_change_carrier, +}; + +/* Array of GENET hardware parameters/characteristics */ +static struct bcmgenet_hw_params bcmgenet_hw_params[] = { + [GENET_V1] = { + .tx_queues = 0, + .tx_bds_per_q = 0, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .hfb_offset = 0x1000, + .rdma_offset = 0x2000, + .tdma_offset = 0x3000, + .words_per_bd = 2, + }, + [GENET_V2] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x1000, + .hfb_reg_offset = 0x2000, + .rdma_offset = 0x3000, + .tdma_offset = 0x4000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT, + }, + [GENET_V3] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x10000, + .tdma_offset = 0x11000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | + GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V4] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V5] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, +}; + +/* Infer hardware parameters from the detected GENET version */ +static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) +{ + struct bcmgenet_hw_params *params; + u32 reg; + u8 major; + u16 gphy_rev; + + if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v4; + } else if (GENET_IS_V3(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V2(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v2; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } else if (GENET_IS_V1(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v1; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + } + + /* enum genet_version starts at 1 */ + priv->hw_params = &bcmgenet_hw_params[priv->version]; + params = priv->hw_params; + + /* Read GENET HW version */ + reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); + major = (reg >> 24 & 0x0f); + if (major == 6) + major = 5; + else if (major == 5) + major = 4; + else if (major == 0) + major = 1; + if (major != priv->version) { + dev_err(&priv->pdev->dev, + "GENET version mismatch, got: %d, configured for: %d\n", + major, priv->version); + } + + /* Print the GENET core version */ + dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, + major, (reg >> 16) & 0x0f, reg & 0xffff); + + /* Store the integrated PHY revision for the MDIO probing function + * to pass this information to the PHY driver. The PHY driver expects + * to find the PHY major revision in bits 15:8 while the GENET register + * stores that information in bits 7:0, account for that. + * + * On newer chips, starting with PHY revision G0, a new scheme is + * deployed similar to the Starfighter 2 switch with GPHY major + * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 + * is reserved as well as special value 0x01ff, we have a small + * heuristic to check for the new GPHY revision and re-arrange things + * so the GPHY driver is happy. + */ + gphy_rev = reg & 0xffff; + + if (GENET_IS_V5(priv)) { + /* The EPHY revision should come from the MDIO registers of + * the PHY not from GENET. + */ + if (gphy_rev != 0) { + pr_warn("GENET is reporting EPHY revision: 0x%04x\n", + gphy_rev); + } + /* This is reserved so should require special treatment */ + } else if (gphy_rev == 0 || gphy_rev == 0x01ff) { + pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); + return; + /* This is the good old scheme, just GPHY major, no minor nor patch */ + } else if ((gphy_rev & 0xf0) != 0) { + priv->gphy_rev = gphy_rev << 8; + /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ + } else if ((gphy_rev & 0xff00) != 0) { + priv->gphy_rev = gphy_rev; + } + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!(params->flags & GENET_HAS_40BITS)) + pr_warn("GENET does not support 40-bits PA\n"); +#endif + + pr_debug("Configuration for version: %d\n" + "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" + "BP << en: %2d, BP msk: 0x%05x\n" + "HFB count: %2d, QTAQ msk: 0x%05x\n" + "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" + "RDMA: 0x%05x, TDMA: 0x%05x\n" + "Words/BD: %d\n", + priv->version, + params->tx_queues, params->tx_bds_per_q, + params->rx_queues, params->rx_bds_per_q, + params->bp_in_en_shift, params->bp_in_mask, + params->hfb_filter_cnt, params->qtag_mask, + params->tbuf_offset, params->hfb_offset, + params->hfb_reg_offset, + params->rdma_offset, params->tdma_offset, + params->words_per_bd); +} + +struct bcmgenet_plat_data { + enum bcmgenet_version version; + u32 dma_max_burst_length; + bool ephy_16nm; +}; + +static const struct bcmgenet_plat_data v1_plat_data = { + .version = GENET_V1, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v2_plat_data = { + .version = GENET_V2, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v3_plat_data = { + .version = GENET_V3, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v4_plat_data = { + .version = GENET_V4, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v5_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data bcm2711_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = 0x08, +}; + +static const struct bcmgenet_plat_data bcm7712_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, + .ephy_16nm = true, +}; + +static const struct of_device_id bcmgenet_match[] = { + { .compatible = "brcm,genet-v1", .data = &v1_plat_data }, + { .compatible = "brcm,genet-v2", .data = &v2_plat_data }, + { .compatible = "brcm,genet-v3", .data = &v3_plat_data }, + { .compatible = "brcm,genet-v4", .data = &v4_plat_data }, + { .compatible = "brcm,genet-v5", .data = &v5_plat_data }, + { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data }, + { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcmgenet_match); + +static int bcmgenet_probe(struct platform_device *pdev) +{ + struct bcmgenet_platform_data *pd = pdev->dev.platform_data; + const struct bcmgenet_plat_data *pdata; + struct bcmgenet_priv *priv; + struct net_device *dev; + unsigned int i; + int err = -EIO; + + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, + GENET_MAX_MQ_CNT + 1); + if (!dev) { + dev_err(&pdev->dev, "can't allocate net device\n"); + return -ENOMEM; + } + + priv = netdev_priv(dev); + priv->irq0 = platform_get_irq(pdev, 0); + if (priv->irq0 < 0) { + err = priv->irq0; + goto err; + } + priv->irq1 = platform_get_irq(pdev, 1); + if (priv->irq1 < 0) { + err = priv->irq1; + goto err; + } + priv->wol_irq = platform_get_irq_optional(pdev, 2); + if (priv->wol_irq == -EPROBE_DEFER) { + err = priv->wol_irq; + goto err; + } + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + err = PTR_ERR(priv->base); + goto err; + } + + spin_lock_init(&priv->lock); + + /* Set default pause parameters */ + priv->autoneg_pause = 1; + priv->tx_pause = 1; + priv->rx_pause = 1; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev_set_drvdata(&pdev->dev, dev); + dev->watchdog_timeo = 2 * HZ; + dev->ethtool_ops = &bcmgenet_ethtool_ops; + dev->netdev_ops = &bcmgenet_netdev_ops; + + priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); + + /* Set default features */ + dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | + NETIF_F_RXCSUM; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; + + /* Request the WOL interrupt and advertise suspend if available */ + priv->wol_irq_disabled = true; + if (priv->wol_irq > 0) { + err = devm_request_irq(&pdev->dev, priv->wol_irq, + bcmgenet_wol_isr, 0, dev->name, priv); + if (!err) + device_set_wakeup_capable(&pdev->dev, 1); + } + + /* Set the needed headroom to account for any possible + * features enabling/disabling at runtime + */ + dev->needed_headroom += 64; + + priv->dev = dev; + priv->pdev = pdev; + + pdata = device_get_match_data(&pdev->dev); + if (pdata) { + priv->version = pdata->version; + priv->dma_max_burst_length = pdata->dma_max_burst_length; + priv->ephy_16nm = pdata->ephy_16nm; + } else { + priv->version = pd->genet_version; + priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; + } + + priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); + if (IS_ERR(priv->clk)) { + dev_dbg(&priv->pdev->dev, "failed to get enet clock\n"); + err = PTR_ERR(priv->clk); + goto err; + } + + err = clk_prepare_enable(priv->clk); + if (err) + goto err; + + bcmgenet_set_hw_params(priv); + + err = -EIO; + if (priv->hw_params->flags & GENET_HAS_40BITS) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err_clk_disable; + + /* Mii wait queue */ + init_waitqueue_head(&priv->wq); + /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ + priv->rx_buf_len = RX_BUF_LENGTH; + INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); + + priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol"); + if (IS_ERR(priv->clk_wol)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); + err = PTR_ERR(priv->clk_wol); + goto err_clk_disable; + } + + priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); + if (IS_ERR(priv->clk_eee)) { + dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); + err = PTR_ERR(priv->clk_eee); + goto err_clk_disable; + } + + /* If this is an internal GPHY, power it on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + if (pd && !IS_ERR_OR_NULL(pd->mac_address)) + eth_hw_addr_set(dev, pd->mac_address); + else + if (device_get_ethdev_address(&pdev->dev, dev)) + if (has_acpi_companion(&pdev->dev)) { + u8 addr[ETH_ALEN]; + + bcmgenet_get_hw_addr(priv, addr); + eth_hw_addr_set(dev, addr); + } + + if (!is_valid_ether_addr(dev->dev_addr)) { + dev_warn(&pdev->dev, "using random Ethernet MAC\n"); + eth_hw_addr_random(dev); + } + + reset_umac(priv); + + err = bcmgenet_mii_init(dev); + if (err) + goto err_clk_disable; + + /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues + * just the ring 16 descriptor based TX + */ + netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); + netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); + + /* Set default coalescing parameters */ + for (i = 0; i < priv->hw_params->rx_queues; i++) + priv->rx_rings[i].rx_max_coalesced_frames = 1; + priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; + + /* libphy will determine the link state */ + netif_carrier_off(dev); + + /* Turn off the main clock, WOL clock is handled separately */ + clk_disable_unprepare(priv->clk); + + err = register_netdev(dev); + if (err) { + bcmgenet_mii_exit(dev); + goto err; + } + + return err; + +err_clk_disable: + clk_disable_unprepare(priv->clk); +err: + free_netdev(dev); + return err; +} + +static int bcmgenet_remove(struct platform_device *pdev) +{ + struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); + + dev_set_drvdata(&pdev->dev, NULL); + unregister_netdev(priv->dev); + bcmgenet_mii_exit(priv->dev); + free_netdev(priv->dev); + + return 0; +} + +static void bcmgenet_shutdown(struct platform_device *pdev) +{ + bcmgenet_remove(pdev); +} + +#ifdef CONFIG_PM_SLEEP +static int bcmgenet_resume_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + u32 reg; + + if (!netif_running(dev)) + return 0; + + /* Turn on the clock */ + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + if (device_may_wakeup(d) && priv->wolopts) { + /* Account for Wake-on-LAN events and clear those events + * (Some devices need more time between enabling the clocks + * and the interrupt register reflecting the wake event so + * read the register twice) + */ + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); + if (reg & UMAC_IRQ_WAKE_EVENT) + pm_wakeup_event(&priv->pdev->dev, 0); + } + + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR); + + return 0; +} + +static int bcmgenet_resume(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + unsigned long dma_ctrl; + int ret; + + if (!netif_running(dev)) + return 0; + + /* From WOL-enabled suspend, switch to regular clock */ + if (device_may_wakeup(d) && priv->wolopts) + bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + bcmgenet_umac_reset(priv); + + init_umac(priv); + + phy_init_hw(dev->phydev); + + /* Speed settings must be restored */ + genphy_config_aneg(dev->phydev); + bcmgenet_mii_config(priv->dev, false); + + /* Restore enabled features */ + bcmgenet_set_features(dev, dev->features); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + /* Restore hardware filters */ + bcmgenet_hfb_clear(priv); + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + bcmgenet_hfb_create_rxnfc_filter(priv, rule); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto out_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + if (!device_may_wakeup(d)) + phy_resume(dev->phydev); + + bcmgenet_netif_start(dev); + + netif_device_attach(dev); + + return 0; + +out_clk_disable: + if (priv->internal_phy) + bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; +} + +static int bcmgenet_suspend(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + + bcmgenet_netif_stop(dev, true); + + if (!device_may_wakeup(d)) + phy_suspend(dev->phydev); + + /* Disable filtering */ + bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); + + return 0; +} + +static int bcmgenet_suspend_noirq(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + if (!netif_running(dev)) + return 0; + + /* Prepare the device for Wake-on-LAN and switch to the slow clock */ + if (device_may_wakeup(d) && priv->wolopts) + ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); + else if (priv->internal_phy) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + /* Let the framework handle resumption and leave the clocks on */ + if (ret) + return ret; + + /* Turn off the clocks */ + clk_disable_unprepare(priv->clk); + + return 0; +} +#else +#define bcmgenet_suspend NULL +#define bcmgenet_suspend_noirq NULL +#define bcmgenet_resume NULL +#define bcmgenet_resume_noirq NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops bcmgenet_pm_ops = { + .suspend = bcmgenet_suspend, + .suspend_noirq = bcmgenet_suspend_noirq, + .resume = bcmgenet_resume, + .resume_noirq = bcmgenet_resume_noirq, +}; + +static const struct acpi_device_id genet_acpi_match[] = { + { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, genet_acpi_match); + +static struct platform_driver bcmgenet_driver = { + .probe = bcmgenet_probe, + .remove = bcmgenet_remove, + .shutdown = bcmgenet_shutdown, + .driver = { + .name = "bcmgenet", + .of_match_table = bcmgenet_match, + .pm = &bcmgenet_pm_ops, + .acpi_match_table = genet_acpi_match, + }, +}; +module_platform_driver(bcmgenet_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); +MODULE_ALIAS("platform:bcmgenet"); +MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: mdio-bcm-unimac"); diff --git a/devices/genet/bcmgenet-6.1-orig.h b/devices/genet/bcmgenet-6.1-orig.h new file mode 100644 index 00000000..1985c0ec --- /dev/null +++ b/devices/genet/bcmgenet-6.1-orig.h @@ -0,0 +1,709 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2020 Broadcom + */ + +#ifndef __BCMGENET_H__ +#define __BCMGENET_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../unimac.h" + +/* total number of Buffer Descriptors, same for Rx/Tx */ +#define TOTAL_DESC 256 + +/* which ring is descriptor based */ +#define DESC_INDEX 16 + +/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528. + * 1536 is multiple of 256 bytes + */ +#define ENET_BRCM_TAG_LEN 6 +#define ENET_PAD 8 +#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \ + ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD) +#define DMA_MAX_BURST_LENGTH 0x10 + +/* misc. configuration */ +#define MAX_NUM_OF_FS_RULES 16 +#define CLEAR_ALL_HFB 0xFF +#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4) +#define DMA_FC_THRESH_LO 5 + +/* 64B receive/transmit status block */ +struct status_64 { + u32 length_status; /* length and peripheral status */ + u32 ext_status; /* Extended status*/ + u32 rx_csum; /* partial rx checksum */ + u32 unused1[9]; /* unused */ + u32 tx_csum_info; /* Tx checksum info. */ + u32 unused2[3]; /* unused */ +}; + +/* Rx status bits */ +#define STATUS_RX_EXT_MASK 0x1FFFFF +#define STATUS_RX_CSUM_MASK 0xFFFF +#define STATUS_RX_CSUM_OK 0x10000 +#define STATUS_RX_CSUM_FR 0x20000 +#define STATUS_RX_PROTO_TCP 0 +#define STATUS_RX_PROTO_UDP 1 +#define STATUS_RX_PROTO_ICMP 2 +#define STATUS_RX_PROTO_OTHER 3 +#define STATUS_RX_PROTO_MASK 3 +#define STATUS_RX_PROTO_SHIFT 18 +#define STATUS_FILTER_INDEX_MASK 0xFFFF +/* Tx status bits */ +#define STATUS_TX_CSUM_START_MASK 0X7FFF +#define STATUS_TX_CSUM_START_SHIFT 16 +#define STATUS_TX_CSUM_PROTO_UDP 0x8000 +#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF +#define STATUS_TX_CSUM_LV 0x80000000 + +/* DMA Descriptor */ +#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */ +#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */ +#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */ + +/* Rx/Tx common counter group */ +struct bcmgenet_pkt_counters { + u32 cnt_64; /* RO Received/Transmited 64 bytes packet */ + u32 cnt_127; /* RO Rx/Tx 127 bytes packet */ + u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */ + u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */ + u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */ + u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */ + u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */ + u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/ + u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/ + u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/ +}; + +/* RSV, Receive Status Vector */ +struct bcmgenet_rx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkt; /* RO (0x428) Received pkt count*/ + u32 bytes; /* RO Received byte count */ + u32 mca; /* RO # of Received multicast pkt */ + u32 bca; /* RO # of Receive broadcast pkt */ + u32 fcs; /* RO # of Received FCS error */ + u32 cf; /* RO # of Received control frame pkt*/ + u32 pf; /* RO # of Received pause frame pkt */ + u32 uo; /* RO # of unknown op code pkt */ + u32 aln; /* RO # of alignment error count */ + u32 flr; /* RO # of frame length out of range count */ + u32 cde; /* RO # of code error pkt */ + u32 fcr; /* RO # of carrier sense error pkt */ + u32 ovr; /* RO # of oversize pkt*/ + u32 jbr; /* RO # of jabber count */ + u32 mtue; /* RO # of MTU error pkt*/ + u32 pok; /* RO # of Received good pkt */ + u32 uc; /* RO # of unicast pkt */ + u32 ppp; /* RO # of PPP pkt */ + u32 rcrc; /* RO (0x470),# of CRC match pkt */ +}; + +/* TSV, Transmit Status Vector */ +struct bcmgenet_tx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkts; /* RO (0x4a8) Transmited pkt */ + u32 mca; /* RO # of xmited multicast pkt */ + u32 bca; /* RO # of xmited broadcast pkt */ + u32 pf; /* RO # of xmited pause frame count */ + u32 cf; /* RO # of xmited control frame count */ + u32 fcs; /* RO # of xmited FCS error count */ + u32 ovr; /* RO # of xmited oversize pkt */ + u32 drf; /* RO # of xmited deferral pkt */ + u32 edf; /* RO # of xmited Excessive deferral pkt*/ + u32 scl; /* RO # of xmited single collision pkt */ + u32 mcl; /* RO # of xmited multiple collision pkt*/ + u32 lcl; /* RO # of xmited late collision pkt */ + u32 ecl; /* RO # of xmited excessive collision pkt*/ + u32 frg; /* RO # of xmited fragments pkt*/ + u32 ncl; /* RO # of xmited total collision count */ + u32 jbr; /* RO # of xmited jabber count*/ + u32 bytes; /* RO # of xmited byte count */ + u32 pok; /* RO # of xmited good pkt */ + u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ +}; + +struct bcmgenet_mib_counters { + struct bcmgenet_rx_counters rx; + struct bcmgenet_tx_counters tx; + u32 rx_runt_cnt; + u32 rx_runt_fcs; + u32 rx_runt_fcs_align; + u32 rx_runt_bytes; + u32 rbuf_ovflow_cnt; + u32 rbuf_err_cnt; + u32 mdf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; + u32 tx_realloc_tsb; + u32 tx_realloc_tsb_failed; +}; + +#define UMAC_MIB_START 0x400 + +#define UMAC_MDIO_CMD 0x614 +#define MDIO_START_BUSY (1 << 29) +#define MDIO_READ_FAIL (1 << 28) +#define MDIO_RD (2 << 26) +#define MDIO_WR (1 << 26) +#define MDIO_PMD_SHIFT 21 +#define MDIO_PMD_MASK 0x1F +#define MDIO_REG_SHIFT 16 +#define MDIO_REG_MASK 0x1F + +#define UMAC_RBUF_OVFL_CNT_V1 0x61C +#define RBUF_OVFL_CNT_V2 0x80 +#define RBUF_OVFL_CNT_V3PLUS 0x94 + +#define UMAC_MPD_CTRL 0x620 +#define MPD_EN (1 << 0) +#define MPD_PW_EN (1 << 27) +#define MPD_MSEQ_LEN_SHIFT 16 +#define MPD_MSEQ_LEN_MASK 0xFF + +#define UMAC_MPD_PW_MS 0x624 +#define UMAC_MPD_PW_LS 0x628 +#define UMAC_RBUF_ERR_CNT_V1 0x634 +#define RBUF_ERR_CNT_V2 0x84 +#define RBUF_ERR_CNT_V3PLUS 0x98 +#define UMAC_MDF_ERR_CNT 0x638 +#define UMAC_MDF_CTRL 0x650 +#define UMAC_MDF_ADDR 0x654 +#define UMAC_MIB_CTRL 0x580 +#define MIB_RESET_RX (1 << 0) +#define MIB_RESET_RUNT (1 << 1) +#define MIB_RESET_TX (1 << 2) + +#define RBUF_CTRL 0x00 +#define RBUF_64B_EN (1 << 0) +#define RBUF_ALIGN_2B (1 << 1) +#define RBUF_BAD_DIS (1 << 2) + +#define RBUF_STATUS 0x0C +#define RBUF_STATUS_WOL (1 << 0) +#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1) +#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2) + +#define RBUF_CHK_CTRL 0x14 +#define RBUF_RXCHK_EN (1 << 0) +#define RBUF_SKIP_FCS (1 << 4) +#define RBUF_L3_PARSE_DIS (1 << 5) + +#define RBUF_ENERGY_CTRL 0x9c +#define RBUF_EEE_EN (1 << 0) +#define RBUF_PM_EN (1 << 1) + +#define RBUF_TBUF_SIZE_CTRL 0xb4 + +#define RBUF_HFB_CTRL_V1 0x38 +#define RBUF_HFB_FILTER_EN_SHIFT 16 +#define RBUF_HFB_FILTER_EN_MASK 0xffff0000 +#define RBUF_HFB_EN (1 << 0) +#define RBUF_HFB_256B (1 << 1) +#define RBUF_ACPI_EN (1 << 2) + +#define RBUF_HFB_LEN_V1 0x3C +#define RBUF_FLTR_LEN_MASK 0xFF +#define RBUF_FLTR_LEN_SHIFT 8 + +#define TBUF_CTRL 0x00 +#define TBUF_64B_EN (1 << 0) +#define TBUF_BP_MC 0x0C +#define TBUF_ENERGY_CTRL 0x14 +#define TBUF_EEE_EN (1 << 0) +#define TBUF_PM_EN (1 << 1) + +#define TBUF_CTRL_V1 0x80 +#define TBUF_BP_MC_V1 0xA0 + +#define HFB_CTRL 0x00 +#define HFB_FLT_ENABLE_V3PLUS 0x04 +#define HFB_FLT_LEN_V2 0x04 +#define HFB_FLT_LEN_V3PLUS 0x1C + +/* uniMac intrl2 registers */ +#define INTRL2_CPU_STAT 0x00 +#define INTRL2_CPU_SET 0x04 +#define INTRL2_CPU_CLEAR 0x08 +#define INTRL2_CPU_MASK_STATUS 0x0C +#define INTRL2_CPU_MASK_SET 0x10 +#define INTRL2_CPU_MASK_CLEAR 0x14 + +/* INTRL2 instance 0 definitions */ +#define UMAC_IRQ_SCB (1 << 0) +#define UMAC_IRQ_EPHY (1 << 1) +#define UMAC_IRQ_PHY_DET_R (1 << 2) +#define UMAC_IRQ_PHY_DET_F (1 << 3) +#define UMAC_IRQ_LINK_UP (1 << 4) +#define UMAC_IRQ_LINK_DOWN (1 << 5) +#define UMAC_IRQ_LINK_EVENT (UMAC_IRQ_LINK_UP | UMAC_IRQ_LINK_DOWN) +#define UMAC_IRQ_UMAC (1 << 6) +#define UMAC_IRQ_UMAC_TSV (1 << 7) +#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8) +#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9) +#define UMAC_IRQ_HFB_SM (1 << 10) +#define UMAC_IRQ_HFB_MM (1 << 11) +#define UMAC_IRQ_MPD_R (1 << 12) +#define UMAC_IRQ_WAKE_EVENT (UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM | \ + UMAC_IRQ_MPD_R) +#define UMAC_IRQ_RXDMA_MBDONE (1 << 13) +#define UMAC_IRQ_RXDMA_PDONE (1 << 14) +#define UMAC_IRQ_RXDMA_BDONE (1 << 15) +#define UMAC_IRQ_RXDMA_DONE UMAC_IRQ_RXDMA_MBDONE +#define UMAC_IRQ_TXDMA_MBDONE (1 << 16) +#define UMAC_IRQ_TXDMA_PDONE (1 << 17) +#define UMAC_IRQ_TXDMA_BDONE (1 << 18) +#define UMAC_IRQ_TXDMA_DONE UMAC_IRQ_TXDMA_MBDONE + +/* Only valid for GENETv3+ */ +#define UMAC_IRQ_MDIO_DONE (1 << 23) +#define UMAC_IRQ_MDIO_ERROR (1 << 24) + +/* INTRL2 instance 1 definitions */ +#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_SHIFT 16 + +/* Register block offsets */ +#define GENET_SYS_OFF 0x0000 +#define GENET_GR_BRIDGE_OFF 0x0040 +#define GENET_EXT_OFF 0x0080 +#define GENET_INTRL2_0_OFF 0x0200 +#define GENET_INTRL2_1_OFF 0x0240 +#define GENET_RBUF_OFF 0x0300 +#define GENET_UMAC_OFF 0x0800 + +/* SYS block offsets and register definitions */ +#define SYS_REV_CTRL 0x00 +#define SYS_PORT_CTRL 0x04 +#define PORT_MODE_INT_EPHY 0 +#define PORT_MODE_INT_GPHY 1 +#define PORT_MODE_EXT_EPHY 2 +#define PORT_MODE_EXT_GPHY 3 +#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4)) +#define PORT_MODE_EXT_RVMII_50 4 +#define LED_ACT_SOURCE_MAC (1 << 9) + +#define SYS_RBUF_FLUSH_CTRL 0x08 +#define SYS_TBUF_FLUSH_CTRL 0x0C +#define RBUF_FLUSH_CTRL_V1 0x04 + +/* Ext block register offsets and definitions */ +#define EXT_EXT_PWR_MGMT 0x00 +#define EXT_PWR_DOWN_BIAS (1 << 0) +#define EXT_PWR_DOWN_DLL (1 << 1) +#define EXT_PWR_DOWN_PHY (1 << 2) +#define EXT_PWR_DN_EN_LD (1 << 3) +#define EXT_ENERGY_DET (1 << 4) +#define EXT_IDDQ_FROM_PHY (1 << 5) +#define EXT_IDDQ_GLBL_PWR (1 << 7) +#define EXT_PHY_RESET (1 << 8) +#define EXT_ENERGY_DET_MASK (1 << 12) +#define EXT_PWR_DOWN_PHY_TX (1 << 16) +#define EXT_PWR_DOWN_PHY_RX (1 << 17) +#define EXT_PWR_DOWN_PHY_SD (1 << 18) +#define EXT_PWR_DOWN_PHY_RD (1 << 19) +#define EXT_PWR_DOWN_PHY_EN (1 << 20) + +#define EXT_RGMII_OOB_CTRL 0x0C +#define RGMII_MODE_EN_V123 (1 << 0) +#define RGMII_LINK (1 << 4) +#define OOB_DISABLE (1 << 5) +#define RGMII_MODE_EN (1 << 6) +#define ID_MODE_DIS (1 << 16) + +#define EXT_GPHY_CTRL 0x1C +#define EXT_CFG_IDDQ_BIAS (1 << 0) +#define EXT_CFG_PWR_DOWN (1 << 1) +#define EXT_CK25_DIS (1 << 4) +#define EXT_CFG_IDDQ_GLOBAL_PWR (1 << 3) +#define EXT_GPHY_RESET (1 << 5) + +/* DMA rings size */ +#define DMA_RING_SIZE (0x40) +#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1)) + +/* DMA registers common definitions */ +#define DMA_RW_POINTER_MASK 0x1FF +#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF +#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16 +#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF +#define DMA_BUFFER_DONE_CNT_SHIFT 16 +#define DMA_P_INDEX_MASK 0xFFFF +#define DMA_C_INDEX_MASK 0xFFFF + +/* DMA ring size register */ +#define DMA_RING_SIZE_MASK 0xFFFF +#define DMA_RING_SIZE_SHIFT 16 +#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF + +/* DMA interrupt threshold register */ +#define DMA_INTR_THRESHOLD_MASK 0x01FF + +/* DMA XON/XOFF register */ +#define DMA_XON_THREHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_SHIFT 16 + +/* DMA flow period register */ +#define DMA_FLOW_PERIOD_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_SHIFT 16 + + +/* DMA control register */ +#define DMA_EN (1 << 0) +#define DMA_RING_BUF_EN_SHIFT 0x01 +#define DMA_RING_BUF_EN_MASK 0xFFFF +#define DMA_TSB_SWAP_EN (1 << 20) + +/* DMA status register */ +#define DMA_DISABLED (1 << 0) +#define DMA_DESC_RAM_INIT_BUSY (1 << 1) + +/* DMA SCB burst size register */ +#define DMA_SCB_BURST_SIZE_MASK 0x1F + +/* DMA activity vector register */ +#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF + +/* DMA backpressure mask register */ +#define DMA_BACKPRESSURE_MASK 0x1FFFF +#define DMA_PFC_ENABLE (1 << 31) + +/* DMA backpressure status register */ +#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF + +/* DMA override register */ +#define DMA_LITTLE_ENDIAN_MODE (1 << 0) +#define DMA_REGISTER_MODE (1 << 1) + +/* DMA timeout register */ +#define DMA_TIMEOUT_MASK 0xFFFF +#define DMA_TIMEOUT_VAL 5000 /* micro seconds */ + +/* TDMA rate limiting control register */ +#define DMA_RATE_LIMIT_EN_MASK 0xFFFF + +/* TDMA arbitration control register */ +#define DMA_ARBITER_MODE_MASK 0x03 +#define DMA_RING_BUF_PRIORITY_MASK 0x1F +#define DMA_RING_BUF_PRIORITY_SHIFT 5 +#define DMA_PRIO_REG_INDEX(q) ((q) / 6) +#define DMA_PRIO_REG_SHIFT(q) (((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT) +#define DMA_RATE_ADJ_MASK 0xFF + +/* Tx/Rx Dma Descriptor common bits*/ +#define DMA_BUFLENGTH_MASK 0x0fff +#define DMA_BUFLENGTH_SHIFT 16 +#define DMA_OWN 0x8000 +#define DMA_EOP 0x4000 +#define DMA_SOP 0x2000 +#define DMA_WRAP 0x1000 +/* Tx specific Dma descriptor bits */ +#define DMA_TX_UNDERRUN 0x0200 +#define DMA_TX_APPEND_CRC 0x0040 +#define DMA_TX_OW_CRC 0x0020 +#define DMA_TX_DO_CSUM 0x0010 +#define DMA_TX_QTAG_SHIFT 7 + +/* Rx Specific Dma descriptor bits */ +#define DMA_RX_CHK_V3PLUS 0x8000 +#define DMA_RX_CHK_V12 0x1000 +#define DMA_RX_BRDCAST 0x0040 +#define DMA_RX_MULT 0x0020 +#define DMA_RX_LG 0x0010 +#define DMA_RX_NO 0x0008 +#define DMA_RX_RXER 0x0004 +#define DMA_RX_CRC_ERROR 0x0002 +#define DMA_RX_OV 0x0001 +#define DMA_RX_FI_MASK 0x001F +#define DMA_RX_FI_SHIFT 0x0007 +#define DMA_DESC_ALLOC_MASK 0x00FF + +#define DMA_ARBITER_RR 0x00 +#define DMA_ARBITER_WRR 0x01 +#define DMA_ARBITER_SP 0x02 + +struct enet_cb { + struct sk_buff *skb; + void __iomem *bd_addr; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* power management mode */ +enum bcmgenet_power_mode { + GENET_POWER_CABLE_SENSE = 0, + GENET_POWER_PASSIVE, + GENET_POWER_WOL_MAGIC, +}; + +struct bcmgenet_priv; + +/* We support both runtime GENET detection and compile-time + * to optimize code-paths for a given hardware + */ +enum bcmgenet_version { + GENET_V1 = 1, + GENET_V2, + GENET_V3, + GENET_V4, + GENET_V5 +}; + +#define GENET_IS_V1(p) ((p)->version == GENET_V1) +#define GENET_IS_V2(p) ((p)->version == GENET_V2) +#define GENET_IS_V3(p) ((p)->version == GENET_V3) +#define GENET_IS_V4(p) ((p)->version == GENET_V4) +#define GENET_IS_V5(p) ((p)->version == GENET_V5) + +/* Hardware flags */ +#define GENET_HAS_40BITS (1 << 0) +#define GENET_HAS_EXT (1 << 1) +#define GENET_HAS_MDIO_INTR (1 << 2) +#define GENET_HAS_MOCA_LINK_DET (1 << 3) + +/* BCMGENET hardware parameters, keep this structure nicely aligned + * since it is going to be used in hot paths + */ +struct bcmgenet_hw_params { + u8 tx_queues; + u8 tx_bds_per_q; + u8 rx_queues; + u8 rx_bds_per_q; + u8 bp_in_en_shift; + u32 bp_in_mask; + u8 hfb_filter_cnt; + u8 hfb_filter_size; + u8 qtag_mask; + u16 tbuf_offset; + u32 hfb_offset; + u32 hfb_reg_offset; + u32 rdma_offset; + u32 tdma_offset; + u32 words_per_bd; + u32 flags; +}; + +struct bcmgenet_skb_cb { + struct enet_cb *first_cb; /* First control block of SKB */ + struct enet_cb *last_cb; /* Last control block of SKB */ + unsigned int bytes_sent; /* bytes on the wire (no TSB) */ +}; + +#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb)) + +struct bcmgenet_tx_ring { + spinlock_t lock; /* ring lock */ + struct napi_struct napi; /* NAPI per tx queue */ + unsigned long packets; + unsigned long bytes; + unsigned int index; /* ring index */ + unsigned int queue; /* queue index */ + struct enet_cb *cbs; /* tx ring buffer control block*/ + unsigned int size; /* size of each tx ring */ + unsigned int clean_ptr; /* Tx ring clean pointer */ + unsigned int c_index; /* last consumer index of each ring*/ + unsigned int free_bds; /* # of free bds for each ring */ + unsigned int write_ptr; /* Tx ring write pointer SW copy */ + unsigned int prod_index; /* Tx ring producer index SW copy */ + unsigned int cb_ptr; /* Tx ring initial CB ptr */ + unsigned int end_ptr; /* Tx ring end CB ptr */ + void (*int_enable)(struct bcmgenet_tx_ring *); + void (*int_disable)(struct bcmgenet_tx_ring *); + struct bcmgenet_priv *priv; +}; + +struct bcmgenet_net_dim { + u16 use_dim; + u16 event_ctr; + unsigned long packets; + unsigned long bytes; + struct dim dim; +}; + +struct bcmgenet_rx_ring { + struct napi_struct napi; /* Rx NAPI struct */ + unsigned long bytes; + unsigned long packets; + unsigned long errors; + unsigned long dropped; + unsigned int index; /* Rx ring index */ + struct enet_cb *cbs; /* Rx ring buffer control block */ + unsigned int size; /* Rx ring size */ + unsigned int c_index; /* Rx last consumer index */ + unsigned int read_ptr; /* Rx ring read pointer */ + unsigned int cb_ptr; /* Rx ring initial CB ptr */ + unsigned int end_ptr; /* Rx ring end CB ptr */ + unsigned int old_discards; + struct bcmgenet_net_dim dim; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs; + void (*int_enable)(struct bcmgenet_rx_ring *); + void (*int_disable)(struct bcmgenet_rx_ring *); + struct bcmgenet_priv *priv; +}; + +enum bcmgenet_rxnfc_state { + BCMGENET_RXNFC_STATE_UNUSED = 0, + BCMGENET_RXNFC_STATE_DISABLED, + BCMGENET_RXNFC_STATE_ENABLED +}; + +struct bcmgenet_rxnfc_rule { + struct list_head list; + struct ethtool_rx_flow_spec fs; + enum bcmgenet_rxnfc_state state; +}; + +/* device context */ +struct bcmgenet_priv { + void __iomem *base; + enum bcmgenet_version version; + struct net_device *dev; + + /* transmit variables */ + void __iomem *tx_bds; + struct enet_cb *tx_cbs; + unsigned int num_tx_bds; + + struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1]; + + /* receive variables */ + void __iomem *rx_bds; + struct enet_cb *rx_cbs; + unsigned int num_rx_bds; + unsigned int rx_buf_len; + struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES]; + struct list_head rxnfc_list; + + struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1]; + + /* other misc variables */ + struct bcmgenet_hw_params *hw_params; + unsigned autoneg_pause:1; + unsigned tx_pause:1; + unsigned rx_pause:1; + + /* MDIO bus variables */ + wait_queue_head_t wq; + bool internal_phy; + struct device_node *phy_dn; + struct device_node *mdio_dn; + struct mii_bus *mii_bus; + u16 gphy_rev; + struct clk *clk_eee; + bool clk_eee_enabled; + + /* PHY device variables */ + phy_interface_t phy_interface; + int phy_addr; + int ext_phy; + bool ephy_16nm; + + /* Interrupt variables */ + struct work_struct bcmgenet_irq_work; + int irq0; + int irq1; + int wol_irq; + bool wol_irq_disabled; + + /* shared status */ + spinlock_t lock; + unsigned int irq0_stat; + + /* HW descriptors/checksum variables */ + bool crc_fwd_en; + + u32 dma_max_burst_length; + + u32 msg_enable; + + struct clk *clk; + struct platform_device *pdev; + struct platform_device *mii_pdev; + + /* WOL */ + struct clk *clk_wol; + u32 wolopts; + u8 sopass[SOPASS_MAX]; + bool wol_active; + + struct bcmgenet_mib_counters mib; + + struct ethtool_eee eee; +}; + +#define GENET_IO_MACRO(name, offset) \ +static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ + u32 off) \ +{ \ + /* MIPS chips strapped for BE will automagically configure the \ + * peripheral registers for CPU-native byte order. \ + */ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + return __raw_readl(priv->base + offset + off); \ + else \ + return readl_relaxed(priv->base + offset + off); \ +} \ +static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \ + u32 val, u32 off) \ +{ \ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \ + __raw_writel(val, priv->base + offset + off); \ + else \ + writel_relaxed(val, priv->base + offset + off); \ +} + +GENET_IO_MACRO(ext, GENET_EXT_OFF); +GENET_IO_MACRO(umac, GENET_UMAC_OFF); +GENET_IO_MACRO(sys, GENET_SYS_OFF); + +/* interrupt l2 registers accessors */ +GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF); +GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF); + +/* HFB register accessors */ +GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset); + +/* GENET v2+ HFB control and filter len helpers */ +GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset); + +/* RBUF register accessors */ +GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); + +/* MDIO routines */ +int bcmgenet_mii_init(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); +int bcmgenet_mii_probe(struct net_device *dev); +void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx); +void bcmgenet_phy_power_set(struct net_device *dev, bool enable); +void bcmgenet_mii_setup(struct net_device *dev); + +/* Wake-on-LAN routines */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); + +void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, + bool tx_lpi_enabled); + +#endif /* __BCMGENET_H__ */ diff --git a/devices/genet/bcmgenet_wol-6.1-ethercat.c b/devices/genet/bcmgenet_wol-6.1-ethercat.c new file mode 100644 index 00000000..7a498121 --- /dev/null +++ b/devices/genet/bcmgenet_wol-6.1-ethercat.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet_wol: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet-6.1-ethercat.h" + +/* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet + * Detection is supported through ethtool + */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) { + wol->supported = 0; + wol->wolopts = 0; + return; + } + + wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; + wol->wolopts = priv->wolopts; + memset(wol->sopass, 0, sizeof(wol->sopass)); + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); +} + +/* ethtool function - set WOL (Wake on LAN) settings. + * Only for magic packet detection mode. + */ +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) + return -ENOTSUPP; + + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); + + /* Flag the device and relevant IRQ as wakeup capable */ + if (wol->wolopts) { + device_set_wakeup_enable(kdev, 1); + /* Avoid unbalanced enable_irq_wake calls */ + if (priv->wol_irq_disabled) + enable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = false; + } else { + device_set_wakeup_enable(kdev, 0); + /* Avoid unbalanced disable_irq_wake calls */ + if (!priv->wol_irq_disabled) + disable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = true; + } + + priv->wolopts = wol->wolopts; + + return 0; +} + +static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) +{ + struct net_device *dev = priv->dev; + int retries = 0; + + while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS) + & RBUF_STATUS_WOL)) { + retries++; + if (retries > 5) { + netdev_crit(dev, "polling wol mode timeout\n"); + return -ETIMEDOUT; + } + mdelay(1); + } + + return retries; +} + +static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv) +{ + bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_MPD_PW_MS); + bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_MPD_PW_LS); +} + +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + struct net_device *dev = priv->dev; + struct bcmgenet_rxnfc_rule *rule; + u32 reg, hfb_ctrl_reg, hfb_enable = 0; + int retries = 0; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, dev, "unsupported mode: %d\n", mode); + return -EINVAL; + } + + /* Can't suspend with WoL if MAC is still in reset */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + reg &= ~CMD_SW_RESET; + + /* disable RX */ + reg &= ~CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + mdelay(10); + + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg |= MPD_EN; + if (priv->wolopts & WAKE_MAGICSECURE) { + bcmgenet_set_mpd_password(priv); + reg |= MPD_PW_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (priv->wolopts & WAKE_FILTER) { + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE) + hfb_enable |= (1 << rule->fs.location); + reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Do not leave UniMAC in MPD mode only */ + retries = bcmgenet_poll_wol_status(priv); + if (retries < 0) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + return retries; + } + + netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n", + retries); + + clk_prepare_enable(priv->clk_wol); + priv->wol_active = 1; + + if (hfb_enable) { + bcmgenet_hfb_reg_writel(priv, hfb_enable, + HFB_FLT_ENABLE_V3PLUS + 4); + hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + } + + /* Enable CRC forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = 1; + reg |= CMD_CRC_FWD; + + /* Receiver must be enabled for WOL MP detection */ + reg |= CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + reg = UMAC_IRQ_MPD_R; + if (hfb_enable) + reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM; + + bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR); + + return 0; +} + +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode); + return; + } + + if (!priv->wol_active) + return; /* failed to suspend so skip the rest */ + + priv->wol_active = 0; + clk_disable_unprepare(priv->clk_wol); + priv->crc_fwd_en = 0; + + /* Disable Magic Packet Detection */ + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (!(reg & MPD_EN)) + return; /* already reset so skip the rest */ + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + /* Disable WAKE_FILTER Detection */ + if (priv->wolopts & WAKE_FILTER) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (!(reg & RBUF_ACPI_EN)) + return; /* already reset so skip the rest */ + reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Disable CRC Forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~CMD_CRC_FWD; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); +} diff --git a/devices/genet/bcmgenet_wol-6.1-orig.c b/devices/genet/bcmgenet_wol-6.1-orig.c new file mode 100644 index 00000000..f55d9d9c --- /dev/null +++ b/devices/genet/bcmgenet_wol-6.1-orig.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support + * + * Copyright (c) 2014-2020 Broadcom + */ + +#define pr_fmt(fmt) "bcmgenet_wol: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet.h" + +/* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet + * Detection is supported through ethtool + */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) { + wol->supported = 0; + wol->wolopts = 0; + return; + } + + wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; + wol->wolopts = priv->wolopts; + memset(wol->sopass, 0, sizeof(wol->sopass)); + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); +} + +/* ethtool function - set WOL (Wake on LAN) settings. + * Only for magic packet detection mode. + */ +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) + return -ENOTSUPP; + + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); + + /* Flag the device and relevant IRQ as wakeup capable */ + if (wol->wolopts) { + device_set_wakeup_enable(kdev, 1); + /* Avoid unbalanced enable_irq_wake calls */ + if (priv->wol_irq_disabled) + enable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = false; + } else { + device_set_wakeup_enable(kdev, 0); + /* Avoid unbalanced disable_irq_wake calls */ + if (!priv->wol_irq_disabled) + disable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = true; + } + + priv->wolopts = wol->wolopts; + + return 0; +} + +static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) +{ + struct net_device *dev = priv->dev; + int retries = 0; + + while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS) + & RBUF_STATUS_WOL)) { + retries++; + if (retries > 5) { + netdev_crit(dev, "polling wol mode timeout\n"); + return -ETIMEDOUT; + } + mdelay(1); + } + + return retries; +} + +static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv) +{ + bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_MPD_PW_MS); + bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_MPD_PW_LS); +} + +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + struct net_device *dev = priv->dev; + struct bcmgenet_rxnfc_rule *rule; + u32 reg, hfb_ctrl_reg, hfb_enable = 0; + int retries = 0; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, dev, "unsupported mode: %d\n", mode); + return -EINVAL; + } + + /* Can't suspend with WoL if MAC is still in reset */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) + reg &= ~CMD_SW_RESET; + + /* disable RX */ + reg &= ~CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + mdelay(10); + + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg |= MPD_EN; + if (priv->wolopts & WAKE_MAGICSECURE) { + bcmgenet_set_mpd_password(priv); + reg |= MPD_PW_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (priv->wolopts & WAKE_FILTER) { + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE) + hfb_enable |= (1 << rule->fs.location); + reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Do not leave UniMAC in MPD mode only */ + retries = bcmgenet_poll_wol_status(priv); + if (retries < 0) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + return retries; + } + + netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n", + retries); + + clk_prepare_enable(priv->clk_wol); + priv->wol_active = 1; + + if (hfb_enable) { + bcmgenet_hfb_reg_writel(priv, hfb_enable, + HFB_FLT_ENABLE_V3PLUS + 4); + hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); + } + + /* Enable CRC forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = 1; + reg |= CMD_CRC_FWD; + + /* Receiver must be enabled for WOL MP detection */ + reg |= CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + reg = UMAC_IRQ_MPD_R; + if (hfb_enable) + reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM; + + bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR); + + return 0; +} + +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode); + return; + } + + if (!priv->wol_active) + return; /* failed to suspend so skip the rest */ + + priv->wol_active = 0; + clk_disable_unprepare(priv->clk_wol); + priv->crc_fwd_en = 0; + + /* Disable Magic Packet Detection */ + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (!(reg & MPD_EN)) + return; /* already reset so skip the rest */ + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } + + /* Disable WAKE_FILTER Detection */ + if (priv->wolopts & WAKE_FILTER) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (!(reg & RBUF_ACPI_EN)) + return; /* already reset so skip the rest */ + reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } + + /* Disable CRC Forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~CMD_CRC_FWD; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); +} diff --git a/devices/genet/bcmmii-6.1-ethercat.c b/devices/genet/bcmmii-6.1-ethercat.c new file mode 100644 index 00000000..36bc8a1e --- /dev/null +++ b/devices/genet/bcmmii-6.1-ethercat.c @@ -0,0 +1,671 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET MDIO routines + * + * Copyright (c) 2014-2017 Broadcom + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet-6.1-ethercat.h" + +static void bcmgenet_mac_config(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + u32 reg, cmd_bits = 0; + + /* speed */ + if (phydev->speed == SPEED_1000) + cmd_bits = CMD_SPEED_1000; + else if (phydev->speed == SPEED_100) + cmd_bits = CMD_SPEED_100; + else + cmd_bits = CMD_SPEED_10; + cmd_bits <<= CMD_SPEED_SHIFT; + + /* duplex */ + if (phydev->duplex != DUPLEX_FULL) { + cmd_bits |= CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + } else { + /* pause capability defaults to Symmetric */ + if (priv->autoneg_pause) { + bool tx_pause = 0, rx_pause = 0; + + if (phydev->autoneg) + phy_get_pause(phydev, &tx_pause, &rx_pause); + + if (!tx_pause) + cmd_bits |= CMD_TX_PAUSE_IGNORE; + if (!rx_pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE; + } + + /* Manual override */ + if (!priv->rx_pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE; + if (!priv->tx_pause) + cmd_bits |= CMD_TX_PAUSE_IGNORE; + } + + /* Program UMAC and RGMII block based on established + * link speed, duplex, and pause. The speed set in + * umac->cmd tell RGMII block which clock to use for + * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). + * Receive clock is provided by the PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + if (reg & CMD_SW_RESET) { + reg &= ~CMD_SW_RESET; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + udelay(2); + reg |= CMD_TX_EN | CMD_RX_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0; + bcmgenet_eee_enable_set(dev, + priv->eee.eee_enabled && priv->eee.eee_active, + priv->eee.tx_lpi_enabled); +} + +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +void bcmgenet_mii_setup(struct net_device *dev) +{ + struct phy_device *phydev = dev->phydev; + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (phydev->link) + bcmgenet_mac_config(dev); + if (priv->ecdev) + ecdev_set_link(priv->ecdev, phydev->link); + phy_print_status(phydev); +} + + +static int bcmgenet_fixed_phy_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + struct bcmgenet_priv *priv; + u32 reg; + + if (dev && dev->phydev && status) { + priv = netdev_priv(dev); + reg = bcmgenet_umac_readl(priv, UMAC_MODE); + status->link = !!(reg & MODE_LINK_STATUS); + } + + return 0; +} + +void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx) +{ + struct phy_device *phydev = dev->phydev; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising, rx); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising, + rx | tx); + phy_start_aneg(phydev); + + mutex_lock(&phydev->lock); + if (phydev->link) + bcmgenet_mac_config(dev); + mutex_unlock(&phydev->lock); +} + +void bcmgenet_phy_power_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg = 0; + + /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ + if (GENET_IS_V4(priv) || priv->ephy_16nm) { + reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); + if (enable) { + reg &= ~EXT_CK25_DIS; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | + EXT_CFG_IDDQ_GLOBAL_PWR); + reg |= EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~EXT_GPHY_RESET; + } else { + reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | + EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + reg |= EXT_CK25_DIS; + } + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + udelay(60); + } else { + mdelay(1); + } +} + +static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) +{ + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + fixed_phy_set_link_update(priv->dev->phydev, + bcmgenet_fixed_phy_link_update); +} + +int bcmgenet_mii_config(struct net_device *dev, bool init) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + struct device *kdev = &priv->pdev->dev; + const char *phy_name = NULL; + u32 id_mode_dis = 0; + u32 port_ctrl; + u32 reg; + + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_INTERNAL: + phy_name = "internal PHY"; + fallthrough; + case PHY_INTERFACE_MODE_MOCA: + /* Irrespective of the actually configured PHY speed (100 or + * 1000) GENETv4 only has an internal GPHY so we will just end + * up masking the Gigabit features from what we support, not + * switching to the EPHY + */ + if (GENET_IS_V4(priv)) + port_ctrl = PORT_MODE_INT_GPHY; + else + port_ctrl = PORT_MODE_INT_EPHY; + + if (!phy_name) { + phy_name = "MoCA"; + if (!GENET_IS_V5(priv)) + port_ctrl |= LED_ACT_SOURCE_MAC; + bcmgenet_moca_phy_setup(priv); + } + break; + + case PHY_INTERFACE_MODE_MII: + phy_name = "external MII"; + phy_set_max_speed(phydev, SPEED_100); + port_ctrl = PORT_MODE_EXT_EPHY; + break; + + case PHY_INTERFACE_MODE_REVMII: + phy_name = "external RvMII"; + /* of_mdiobus_register took care of reading the 'max-speed' + * PHY property for us, effectively limiting the PHY supported + * capabilities, use that knowledge to also configure the + * Reverse MII interface correctly. + */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + dev->phydev->supported)) + port_ctrl = PORT_MODE_EXT_RVMII_50; + else + port_ctrl = PORT_MODE_EXT_RVMII_25; + break; + + case PHY_INTERFACE_MODE_RGMII: + /* RGMII_NO_ID: TXC transitions at the same time as TXD + * (requires PCB or receiver-side delay) + * + * ID is implicitly disabled for 100Mbps (RG)MII operation. + */ + phy_name = "external RGMII (no delay)"; + id_mode_dis = BIT(16); + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_TXID: + /* RGMII_TXID: Add 2ns delay on TXC (90 degree shift) */ + phy_name = "external RGMII (TX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_RXID: + phy_name = "external RGMII (RX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + default: + dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); + return -EINVAL; + } + + bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + + priv->ext_phy = !priv->internal_phy && + (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); + + /* This is an external PHY (xMII), so we need to enable the RGMII + * block for the interface to work + */ + if (priv->ext_phy) { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~ID_MODE_DIS; + reg |= id_mode_dis; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + reg |= RGMII_MODE_EN_V123; + else + reg |= RGMII_MODE_EN; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + } + + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); + + return 0; +} + +int bcmgenet_mii_probe(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + phy_interface_t phy_iface = priv->phy_interface; + struct phy_device *phydev; + u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_IDDQ_SUSPEND; + int ret; + + /* Communicate the integrated PHY revision */ + if (priv->internal_phy) + phy_flags = priv->gphy_rev; + + /* This is an ugly quirk but we have not been correctly interpreting + * the phy_interface values and we have done that across different + * drivers, so at least we are consistent in our mistakes. + * + * When the Generic PHY driver is in use either the PHY has been + * strapped or programmed correctly by the boot loader so we should + * stick to our incorrect interpretation since we have validated it. + * + * Now when a dedicated PHY driver is in use, we need to reverse the + * meaning of the phy_interface_mode values to something that the PHY + * driver will interpret and act on such that we have two mistakes + * canceling themselves so to speak. We only do this for the two + * modes that GENET driver officially supports on Broadcom STB chips: + * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. Other + * modes are not *officially* supported with the boot loader and the + * scripted environment generating Device Tree blobs for those + * platforms. + * + * Note that internal PHY, MoCA and fixed-link configurations are not + * affected because they use different phy_interface_t values or the + * Generic PHY driver. + */ + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + phy_iface = PHY_INTERFACE_MODE_RGMII_ID; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + phy_iface = PHY_INTERFACE_MODE_RGMII_RXID; + break; + default: + break; + } + + if (dn) { + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, phy_iface); + if (!phydev) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } else { + if (has_acpi_companion(kdev)) { + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct mii_bus *unimacbus; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + unimacbus = mdio_find_bus(mdio_bus_id); + if (!unimacbus) { + pr_err("Unable to find mii\n"); + return -ENODEV; + } + phydev = phy_find_first(unimacbus); + put_device(&unimacbus->dev); + if (!phydev) { + pr_err("Unable to find PHY\n"); + return -ENODEV; + } + } else { + phydev = dev->phydev; + } + phydev->dev_flags = phy_flags; + + ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, + phy_iface); + if (ret) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } + + /* Configure port multiplexer based on what the probed PHY device since + * reading the 'max-speed' property determines the maximum supported + * PHY speed which is needed for bcmgenet_mii_config() to configure + * things appropriately. + */ + ret = bcmgenet_mii_config(dev, true); + if (ret) { + phy_disconnect(dev->phydev); + return ret; + } + + /* The internal PHY has its link interrupts routed to the + * Ethernet MAC ISRs. On GENETv5 there is a hardware issue + * that prevents the signaling of link UP interrupts when + * the link operates at 10Mbps, so fallback to polling for + * those versions of GENET. + */ + if (priv->internal_phy && !GENET_IS_V5(priv)) + dev->phydev->irq = PHY_MAC_INTERRUPT; + + /* Indicate that the MAC is responsible for PHY PM */ + dev->phydev->mac_managed_pm = true; + + return 0; +} + +static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + char *compat; + + compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); + if (!compat) + return NULL; + + priv->mdio_dn = of_get_compatible_child(dn, compat); + kfree(compat); + if (!priv->mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); + return NULL; + } + + return priv->mdio_dn; +} + +static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, + struct unimac_mdio_pdata *ppd) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + ppd->phy_mask = 1 << pd->phy_address; + else + ppd->phy_mask = 0; + } +} + +static int bcmgenet_mii_wait(void *wait_func_data) +{ + struct bcmgenet_priv *priv = wait_func_data; + + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); + return 0; +} + +static int bcmgenet_mii_register(struct bcmgenet_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + struct unimac_mdio_pdata ppd; + struct platform_device *ppdev; + struct resource *pres, res; + int id, ret; + + pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!pres) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } + memset(&res, 0, sizeof(res)); + memset(&ppd, 0, sizeof(ppd)); + + ppd.wait_func = bcmgenet_mii_wait; + ppd.wait_func_data = priv; + ppd.bus_name = "bcmgenet MII bus"; + + /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD + * and is 2 * 32-bits word long, 8 bytes total. + */ + res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD; + res.end = res.start + 8; + res.flags = IORESOURCE_MEM; + + if (dn) + id = of_alias_get_id(dn, "eth"); + else + id = pdev->id; + + ppdev = platform_device_alloc(UNIMAC_MDIO_DRV_NAME, id); + if (!ppdev) + return -ENOMEM; + + /* Retain this platform_device pointer for later cleanup */ + priv->mii_pdev = ppdev; + ppdev->dev.parent = &pdev->dev; + if (dn) + ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); + else if (pdata) + bcmgenet_mii_pdata_init(priv, &ppd); + else + ppd.phy_mask = ~0; + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto out; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto out; + + ret = platform_device_add(ppdev); + if (ret) + goto out; + + return 0; +out: + platform_device_put(ppdev); + return ret; +} + +static int bcmgenet_phy_interface_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + int phy_mode = device_get_phy_mode(kdev); + + if (phy_mode < 0) { + dev_err(kdev, "invalid PHY mode property\n"); + return phy_mode; + } + + priv->phy_interface = phy_mode; + + /* We need to specifically look up whether this PHY interface is + * internal or not *before* we even try to probe the PHY driver + * over MDIO as we may have shut down the internal PHY for power + * saving purposes. + */ + if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL) + priv->internal_phy = true; + + return 0; +} + +static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct phy_device *phydev; + int ret; + + /* Fetch the PHY phandle */ + priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + + /* Get the link mode */ + ret = bcmgenet_phy_interface_init(priv); + if (ret) + return ret; + + /* Make sure we initialize MoCA PHYs with a link down */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + phydev = of_phy_find_device(dn); + if (phydev) { + phydev->link = 0; + put_device(&phydev->mdio.dev); + } + } + + return 0; +} + +static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + char phy_name[MII_BUS_ID_SIZE + 3]; + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct phy_device *phydev; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, pd->phy_address); + + /* + * Internal or external PHY with MDIO access + */ + phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); + if (IS_ERR(phydev)) { + dev_err(kdev, "failed to register PHY device\n"); + return PTR_ERR(phydev); + } + } else { + /* + * MoCA port or no MDIO access. + * Use fixed PHY to represent the link layer. + */ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = pd->phy_speed, + .duplex = pd->phy_duplex, + .pause = 0, + .asym_pause = 0, + }; + + phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phydev || IS_ERR(phydev)) { + dev_err(kdev, "failed to register fixed PHY device\n"); + return -ENODEV; + } + + /* Make sure we initialize MoCA PHYs with a link down */ + phydev->link = 0; + + } + + priv->phy_interface = pd->phy_interface; + + return 0; +} + +static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + + if (dn) + return bcmgenet_mii_of_init(priv); + else if (has_acpi_companion(kdev)) + return bcmgenet_phy_interface_init(priv); + else + return bcmgenet_mii_pd_init(priv); +} + +int bcmgenet_mii_init(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + + ret = bcmgenet_mii_register(priv); + if (ret) + return ret; + + ret = bcmgenet_mii_bus_init(priv); + if (ret) + goto out; + + return 0; + +out: + bcmgenet_mii_exit(dev); + return ret; +} + +void bcmgenet_mii_exit(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device_node *dn = priv->pdev->dev.of_node; + + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); + of_node_put(priv->phy_dn); + platform_device_unregister(priv->mii_pdev); +} diff --git a/devices/genet/bcmmii-6.1-orig.c b/devices/genet/bcmmii-6.1-orig.c new file mode 100644 index 00000000..bf9e2467 --- /dev/null +++ b/devices/genet/bcmmii-6.1-orig.c @@ -0,0 +1,668 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Broadcom GENET MDIO routines + * + * Copyright (c) 2014-2017 Broadcom + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet.h" + +static void bcmgenet_mac_config(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + u32 reg, cmd_bits = 0; + + /* speed */ + if (phydev->speed == SPEED_1000) + cmd_bits = CMD_SPEED_1000; + else if (phydev->speed == SPEED_100) + cmd_bits = CMD_SPEED_100; + else + cmd_bits = CMD_SPEED_10; + cmd_bits <<= CMD_SPEED_SHIFT; + + /* duplex */ + if (phydev->duplex != DUPLEX_FULL) { + cmd_bits |= CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + } else { + /* pause capability defaults to Symmetric */ + if (priv->autoneg_pause) { + bool tx_pause = 0, rx_pause = 0; + + if (phydev->autoneg) + phy_get_pause(phydev, &tx_pause, &rx_pause); + + if (!tx_pause) + cmd_bits |= CMD_TX_PAUSE_IGNORE; + if (!rx_pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE; + } + + /* Manual override */ + if (!priv->rx_pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE; + if (!priv->tx_pause) + cmd_bits |= CMD_TX_PAUSE_IGNORE; + } + + /* Program UMAC and RGMII block based on established + * link speed, duplex, and pause. The speed set in + * umac->cmd tell RGMII block which clock to use for + * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). + * Receive clock is provided by the PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + if (reg & CMD_SW_RESET) { + reg &= ~CMD_SW_RESET; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + udelay(2); + reg |= CMD_TX_EN | CMD_RX_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0; + bcmgenet_eee_enable_set(dev, + priv->eee.eee_enabled && priv->eee.eee_active, + priv->eee.tx_lpi_enabled); +} + +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +void bcmgenet_mii_setup(struct net_device *dev) +{ + struct phy_device *phydev = dev->phydev; + + if (phydev->link) + bcmgenet_mac_config(dev); + phy_print_status(phydev); +} + + +static int bcmgenet_fixed_phy_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + struct bcmgenet_priv *priv; + u32 reg; + + if (dev && dev->phydev && status) { + priv = netdev_priv(dev); + reg = bcmgenet_umac_readl(priv, UMAC_MODE); + status->link = !!(reg & MODE_LINK_STATUS); + } + + return 0; +} + +void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx) +{ + struct phy_device *phydev = dev->phydev; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising, rx); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising, + rx | tx); + phy_start_aneg(phydev); + + mutex_lock(&phydev->lock); + if (phydev->link) + bcmgenet_mac_config(dev); + mutex_unlock(&phydev->lock); +} + +void bcmgenet_phy_power_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg = 0; + + /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ + if (GENET_IS_V4(priv) || priv->ephy_16nm) { + reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); + if (enable) { + reg &= ~EXT_CK25_DIS; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | + EXT_CFG_IDDQ_GLOBAL_PWR); + reg |= EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~EXT_GPHY_RESET; + } else { + reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | + EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + reg |= EXT_CK25_DIS; + } + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + udelay(60); + } else { + mdelay(1); + } +} + +static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) +{ + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + fixed_phy_set_link_update(priv->dev->phydev, + bcmgenet_fixed_phy_link_update); +} + +int bcmgenet_mii_config(struct net_device *dev, bool init) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + struct device *kdev = &priv->pdev->dev; + const char *phy_name = NULL; + u32 id_mode_dis = 0; + u32 port_ctrl; + u32 reg; + + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_INTERNAL: + phy_name = "internal PHY"; + fallthrough; + case PHY_INTERFACE_MODE_MOCA: + /* Irrespective of the actually configured PHY speed (100 or + * 1000) GENETv4 only has an internal GPHY so we will just end + * up masking the Gigabit features from what we support, not + * switching to the EPHY + */ + if (GENET_IS_V4(priv)) + port_ctrl = PORT_MODE_INT_GPHY; + else + port_ctrl = PORT_MODE_INT_EPHY; + + if (!phy_name) { + phy_name = "MoCA"; + if (!GENET_IS_V5(priv)) + port_ctrl |= LED_ACT_SOURCE_MAC; + bcmgenet_moca_phy_setup(priv); + } + break; + + case PHY_INTERFACE_MODE_MII: + phy_name = "external MII"; + phy_set_max_speed(phydev, SPEED_100); + port_ctrl = PORT_MODE_EXT_EPHY; + break; + + case PHY_INTERFACE_MODE_REVMII: + phy_name = "external RvMII"; + /* of_mdiobus_register took care of reading the 'max-speed' + * PHY property for us, effectively limiting the PHY supported + * capabilities, use that knowledge to also configure the + * Reverse MII interface correctly. + */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + dev->phydev->supported)) + port_ctrl = PORT_MODE_EXT_RVMII_50; + else + port_ctrl = PORT_MODE_EXT_RVMII_25; + break; + + case PHY_INTERFACE_MODE_RGMII: + /* RGMII_NO_ID: TXC transitions at the same time as TXD + * (requires PCB or receiver-side delay) + * + * ID is implicitly disabled for 100Mbps (RG)MII operation. + */ + phy_name = "external RGMII (no delay)"; + id_mode_dis = BIT(16); + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_TXID: + /* RGMII_TXID: Add 2ns delay on TXC (90 degree shift) */ + phy_name = "external RGMII (TX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_RXID: + phy_name = "external RGMII (RX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + default: + dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); + return -EINVAL; + } + + bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + + priv->ext_phy = !priv->internal_phy && + (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); + + /* This is an external PHY (xMII), so we need to enable the RGMII + * block for the interface to work + */ + if (priv->ext_phy) { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~ID_MODE_DIS; + reg |= id_mode_dis; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) + reg |= RGMII_MODE_EN_V123; + else + reg |= RGMII_MODE_EN; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + } + + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); + + return 0; +} + +int bcmgenet_mii_probe(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + phy_interface_t phy_iface = priv->phy_interface; + struct phy_device *phydev; + u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_IDDQ_SUSPEND; + int ret; + + /* Communicate the integrated PHY revision */ + if (priv->internal_phy) + phy_flags = priv->gphy_rev; + + /* This is an ugly quirk but we have not been correctly interpreting + * the phy_interface values and we have done that across different + * drivers, so at least we are consistent in our mistakes. + * + * When the Generic PHY driver is in use either the PHY has been + * strapped or programmed correctly by the boot loader so we should + * stick to our incorrect interpretation since we have validated it. + * + * Now when a dedicated PHY driver is in use, we need to reverse the + * meaning of the phy_interface_mode values to something that the PHY + * driver will interpret and act on such that we have two mistakes + * canceling themselves so to speak. We only do this for the two + * modes that GENET driver officially supports on Broadcom STB chips: + * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. Other + * modes are not *officially* supported with the boot loader and the + * scripted environment generating Device Tree blobs for those + * platforms. + * + * Note that internal PHY, MoCA and fixed-link configurations are not + * affected because they use different phy_interface_t values or the + * Generic PHY driver. + */ + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + phy_iface = PHY_INTERFACE_MODE_RGMII_ID; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + phy_iface = PHY_INTERFACE_MODE_RGMII_RXID; + break; + default: + break; + } + + if (dn) { + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, phy_iface); + if (!phydev) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } else { + if (has_acpi_companion(kdev)) { + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct mii_bus *unimacbus; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + unimacbus = mdio_find_bus(mdio_bus_id); + if (!unimacbus) { + pr_err("Unable to find mii\n"); + return -ENODEV; + } + phydev = phy_find_first(unimacbus); + put_device(&unimacbus->dev); + if (!phydev) { + pr_err("Unable to find PHY\n"); + return -ENODEV; + } + } else { + phydev = dev->phydev; + } + phydev->dev_flags = phy_flags; + + ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, + phy_iface); + if (ret) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } + + /* Configure port multiplexer based on what the probed PHY device since + * reading the 'max-speed' property determines the maximum supported + * PHY speed which is needed for bcmgenet_mii_config() to configure + * things appropriately. + */ + ret = bcmgenet_mii_config(dev, true); + if (ret) { + phy_disconnect(dev->phydev); + return ret; + } + + /* The internal PHY has its link interrupts routed to the + * Ethernet MAC ISRs. On GENETv5 there is a hardware issue + * that prevents the signaling of link UP interrupts when + * the link operates at 10Mbps, so fallback to polling for + * those versions of GENET. + */ + if (priv->internal_phy && !GENET_IS_V5(priv)) + dev->phydev->irq = PHY_MAC_INTERRUPT; + + /* Indicate that the MAC is responsible for PHY PM */ + dev->phydev->mac_managed_pm = true; + + return 0; +} + +static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + char *compat; + + compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); + if (!compat) + return NULL; + + priv->mdio_dn = of_get_compatible_child(dn, compat); + kfree(compat); + if (!priv->mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); + return NULL; + } + + return priv->mdio_dn; +} + +static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, + struct unimac_mdio_pdata *ppd) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + ppd->phy_mask = 1 << pd->phy_address; + else + ppd->phy_mask = 0; + } +} + +static int bcmgenet_mii_wait(void *wait_func_data) +{ + struct bcmgenet_priv *priv = wait_func_data; + + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); + return 0; +} + +static int bcmgenet_mii_register(struct bcmgenet_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + struct unimac_mdio_pdata ppd; + struct platform_device *ppdev; + struct resource *pres, res; + int id, ret; + + pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!pres) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } + memset(&res, 0, sizeof(res)); + memset(&ppd, 0, sizeof(ppd)); + + ppd.wait_func = bcmgenet_mii_wait; + ppd.wait_func_data = priv; + ppd.bus_name = "bcmgenet MII bus"; + + /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD + * and is 2 * 32-bits word long, 8 bytes total. + */ + res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD; + res.end = res.start + 8; + res.flags = IORESOURCE_MEM; + + if (dn) + id = of_alias_get_id(dn, "eth"); + else + id = pdev->id; + + ppdev = platform_device_alloc(UNIMAC_MDIO_DRV_NAME, id); + if (!ppdev) + return -ENOMEM; + + /* Retain this platform_device pointer for later cleanup */ + priv->mii_pdev = ppdev; + ppdev->dev.parent = &pdev->dev; + if (dn) + ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); + else if (pdata) + bcmgenet_mii_pdata_init(priv, &ppd); + else + ppd.phy_mask = ~0; + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto out; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto out; + + ret = platform_device_add(ppdev); + if (ret) + goto out; + + return 0; +out: + platform_device_put(ppdev); + return ret; +} + +static int bcmgenet_phy_interface_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + int phy_mode = device_get_phy_mode(kdev); + + if (phy_mode < 0) { + dev_err(kdev, "invalid PHY mode property\n"); + return phy_mode; + } + + priv->phy_interface = phy_mode; + + /* We need to specifically look up whether this PHY interface is + * internal or not *before* we even try to probe the PHY driver + * over MDIO as we may have shut down the internal PHY for power + * saving purposes. + */ + if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL) + priv->internal_phy = true; + + return 0; +} + +static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct phy_device *phydev; + int ret; + + /* Fetch the PHY phandle */ + priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + + /* Get the link mode */ + ret = bcmgenet_phy_interface_init(priv); + if (ret) + return ret; + + /* Make sure we initialize MoCA PHYs with a link down */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + phydev = of_phy_find_device(dn); + if (phydev) { + phydev->link = 0; + put_device(&phydev->mdio.dev); + } + } + + return 0; +} + +static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + char phy_name[MII_BUS_ID_SIZE + 3]; + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct phy_device *phydev; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, pd->phy_address); + + /* + * Internal or external PHY with MDIO access + */ + phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); + if (IS_ERR(phydev)) { + dev_err(kdev, "failed to register PHY device\n"); + return PTR_ERR(phydev); + } + } else { + /* + * MoCA port or no MDIO access. + * Use fixed PHY to represent the link layer. + */ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = pd->phy_speed, + .duplex = pd->phy_duplex, + .pause = 0, + .asym_pause = 0, + }; + + phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phydev || IS_ERR(phydev)) { + dev_err(kdev, "failed to register fixed PHY device\n"); + return -ENODEV; + } + + /* Make sure we initialize MoCA PHYs with a link down */ + phydev->link = 0; + + } + + priv->phy_interface = pd->phy_interface; + + return 0; +} + +static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct device_node *dn = kdev->of_node; + + if (dn) + return bcmgenet_mii_of_init(priv); + else if (has_acpi_companion(kdev)) + return bcmgenet_phy_interface_init(priv); + else + return bcmgenet_mii_pd_init(priv); +} + +int bcmgenet_mii_init(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + + ret = bcmgenet_mii_register(priv); + if (ret) + return ret; + + ret = bcmgenet_mii_bus_init(priv); + if (ret) + goto out; + + return 0; + +out: + bcmgenet_mii_exit(dev); + return ret; +} + +void bcmgenet_mii_exit(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device_node *dn = priv->pdev->dev.of_node; + + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); + of_node_put(priv->phy_dn); + platform_device_unregister(priv->mii_pdev); +} diff --git a/devices/genet/unimac-6.1-ethercat.h b/devices/genet/unimac-6.1-ethercat.h new file mode 100644 index 00000000..585a8528 --- /dev/null +++ b/devices/genet/unimac-6.1-ethercat.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __UNIMAC_H +#define __UNIMAC_H + +#define UMAC_HD_BKP_CTRL 0x004 +#define HD_FC_EN (1 << 0) +#define HD_FC_BKOFF_OK (1 << 1) +#define IPG_CONFIG_RX_SHIFT 2 +#define IPG_CONFIG_RX_MASK 0x1F +#define UMAC_CMD 0x008 +#define CMD_TX_EN (1 << 0) +#define CMD_RX_EN (1 << 1) +#define CMD_SPEED_10 0 +#define CMD_SPEED_100 1 +#define CMD_SPEED_1000 2 +#define CMD_SPEED_2500 3 +#define CMD_SPEED_SHIFT 2 +#define CMD_SPEED_MASK 3 +#define CMD_PROMISC (1 << 4) +#define CMD_PAD_EN (1 << 5) +#define CMD_CRC_FWD (1 << 6) +#define CMD_PAUSE_FWD (1 << 7) +#define CMD_RX_PAUSE_IGNORE (1 << 8) +#define CMD_TX_ADDR_INS (1 << 9) +#define CMD_HD_EN (1 << 10) +#define CMD_SW_RESET_OLD (1 << 11) +#define CMD_SW_RESET (1 << 13) +#define CMD_LCL_LOOP_EN (1 << 15) +#define CMD_AUTO_CONFIG (1 << 22) +#define CMD_CNTL_FRM_EN (1 << 23) +#define CMD_NO_LEN_CHK (1 << 24) +#define CMD_RMT_LOOP_EN (1 << 25) +#define CMD_RX_ERR_DISC (1 << 26) +#define CMD_PRBL_EN (1 << 27) +#define CMD_TX_PAUSE_IGNORE (1 << 28) +#define CMD_TX_RX_EN (1 << 29) +#define CMD_RUNT_FILTER_DIS (1 << 30) +#define UMAC_MAC0 0x00c +#define UMAC_MAC1 0x010 +#define UMAC_MAX_FRAME_LEN 0x014 +#define UMAC_PAUSE_QUANTA 0x018 +#define UMAC_MODE 0x044 +#define MODE_LINK_STATUS (1 << 5) +#define UMAC_FRM_TAG0 0x048 /* outer tag */ +#define UMAC_FRM_TAG1 0x04c /* inner tag */ +#define UMAC_TX_IPG_LEN 0x05c +#define UMAC_EEE_CTRL 0x064 +#define EN_LPI_RX_PAUSE (1 << 0) +#define EN_LPI_TX_PFC (1 << 1) +#define EN_LPI_TX_PAUSE (1 << 2) +#define EEE_EN (1 << 3) +#define RX_FIFO_CHECK (1 << 4) +#define EEE_TX_CLK_DIS (1 << 5) +#define DIS_EEE_10M (1 << 6) +#define LP_IDLE_PREDICTION_MODE (1 << 7) +#define UMAC_EEE_LPI_TIMER 0x068 +#define UMAC_EEE_WAKE_TIMER 0x06C +#define UMAC_EEE_REF_COUNT 0x070 +#define EEE_REFERENCE_COUNT_MASK 0xffff +#define UMAC_RX_IPG_INV 0x078 +#define UMAC_MACSEC_PROG_TX_CRC 0x310 +#define UMAC_MACSEC_CTRL 0x314 +#define UMAC_PAUSE_CTRL 0x330 +#define UMAC_TX_FLUSH 0x334 +#define UMAC_RX_FIFO_STATUS 0x338 +#define UMAC_TX_FIFO_STATUS 0x33c + +#endif diff --git a/devices/genet/unimac-6.1-orig.h b/devices/genet/unimac-6.1-orig.h new file mode 100644 index 00000000..585a8528 --- /dev/null +++ b/devices/genet/unimac-6.1-orig.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __UNIMAC_H +#define __UNIMAC_H + +#define UMAC_HD_BKP_CTRL 0x004 +#define HD_FC_EN (1 << 0) +#define HD_FC_BKOFF_OK (1 << 1) +#define IPG_CONFIG_RX_SHIFT 2 +#define IPG_CONFIG_RX_MASK 0x1F +#define UMAC_CMD 0x008 +#define CMD_TX_EN (1 << 0) +#define CMD_RX_EN (1 << 1) +#define CMD_SPEED_10 0 +#define CMD_SPEED_100 1 +#define CMD_SPEED_1000 2 +#define CMD_SPEED_2500 3 +#define CMD_SPEED_SHIFT 2 +#define CMD_SPEED_MASK 3 +#define CMD_PROMISC (1 << 4) +#define CMD_PAD_EN (1 << 5) +#define CMD_CRC_FWD (1 << 6) +#define CMD_PAUSE_FWD (1 << 7) +#define CMD_RX_PAUSE_IGNORE (1 << 8) +#define CMD_TX_ADDR_INS (1 << 9) +#define CMD_HD_EN (1 << 10) +#define CMD_SW_RESET_OLD (1 << 11) +#define CMD_SW_RESET (1 << 13) +#define CMD_LCL_LOOP_EN (1 << 15) +#define CMD_AUTO_CONFIG (1 << 22) +#define CMD_CNTL_FRM_EN (1 << 23) +#define CMD_NO_LEN_CHK (1 << 24) +#define CMD_RMT_LOOP_EN (1 << 25) +#define CMD_RX_ERR_DISC (1 << 26) +#define CMD_PRBL_EN (1 << 27) +#define CMD_TX_PAUSE_IGNORE (1 << 28) +#define CMD_TX_RX_EN (1 << 29) +#define CMD_RUNT_FILTER_DIS (1 << 30) +#define UMAC_MAC0 0x00c +#define UMAC_MAC1 0x010 +#define UMAC_MAX_FRAME_LEN 0x014 +#define UMAC_PAUSE_QUANTA 0x018 +#define UMAC_MODE 0x044 +#define MODE_LINK_STATUS (1 << 5) +#define UMAC_FRM_TAG0 0x048 /* outer tag */ +#define UMAC_FRM_TAG1 0x04c /* inner tag */ +#define UMAC_TX_IPG_LEN 0x05c +#define UMAC_EEE_CTRL 0x064 +#define EN_LPI_RX_PAUSE (1 << 0) +#define EN_LPI_TX_PFC (1 << 1) +#define EN_LPI_TX_PAUSE (1 << 2) +#define EEE_EN (1 << 3) +#define RX_FIFO_CHECK (1 << 4) +#define EEE_TX_CLK_DIS (1 << 5) +#define DIS_EEE_10M (1 << 6) +#define LP_IDLE_PREDICTION_MODE (1 << 7) +#define UMAC_EEE_LPI_TIMER 0x068 +#define UMAC_EEE_WAKE_TIMER 0x06C +#define UMAC_EEE_REF_COUNT 0x070 +#define EEE_REFERENCE_COUNT_MASK 0xffff +#define UMAC_RX_IPG_INV 0x078 +#define UMAC_MACSEC_PROG_TX_CRC 0x310 +#define UMAC_MACSEC_CTRL 0x314 +#define UMAC_PAUSE_CTRL 0x330 +#define UMAC_TX_FLUSH 0x334 +#define UMAC_RX_FIFO_STATUS 0x338 +#define UMAC_TX_FIFO_STATUS 0x33c + +#endif diff --git a/devices/igb/Makefile.am b/devices/igb/Makefile.am index cb9b7698..580558f5 100644 --- a/devices/igb/Makefile.am +++ b/devices/igb/Makefile.am @@ -44,10 +44,18 @@ EXTRA_DIST = \ e1000_82575-5.10-ethercat.h \ e1000_82575-5.10-orig.c \ e1000_82575-5.10-orig.h \ + e1000_82575-5.14-ethercat.c \ + e1000_82575-5.14-ethercat.h \ + e1000_82575-5.14-orig.c \ + e1000_82575-5.14-orig.h \ e1000_82575-5.15-ethercat.c \ e1000_82575-5.15-ethercat.h \ e1000_82575-5.15-orig.c \ e1000_82575-5.15-orig.h \ + e1000_82575-6.1-ethercat.c \ + e1000_82575-6.1-ethercat.h \ + e1000_82575-6.1-orig.c \ + e1000_82575-6.1-orig.h \ e1000_defines-3.18-ethercat.h \ e1000_defines-3.18-orig.h \ e1000_defines-4.19-ethercat.h \ @@ -56,8 +64,12 @@ EXTRA_DIST = \ e1000_defines-4.4-orig.h \ e1000_defines-5.10-ethercat.h \ e1000_defines-5.10-orig.h \ + e1000_defines-5.14-ethercat.h \ + e1000_defines-5.14-orig.h \ e1000_defines-5.15-ethercat.h \ e1000_defines-5.15-orig.h \ + e1000_defines-6.1-ethercat.h \ + e1000_defines-6.1-orig.h \ e1000_hw-3.18-ethercat.h \ e1000_hw-3.18-orig.h \ e1000_hw-4.19-ethercat.h \ @@ -66,8 +78,12 @@ EXTRA_DIST = \ e1000_hw-4.4-orig.h \ e1000_hw-5.10-ethercat.h \ e1000_hw-5.10-orig.h \ + e1000_hw-5.14-ethercat.h \ + e1000_hw-5.14-orig.h \ e1000_hw-5.15-ethercat.h \ e1000_hw-5.15-orig.h \ + e1000_hw-6.1-ethercat.h \ + e1000_hw-6.1-orig.h \ e1000_i210-3.18-ethercat.c \ e1000_i210-3.18-ethercat.h \ e1000_i210-3.18-orig.c \ @@ -84,10 +100,18 @@ EXTRA_DIST = \ e1000_i210-5.10-ethercat.h \ e1000_i210-5.10-orig.c \ e1000_i210-5.10-orig.h \ + e1000_i210-5.14-ethercat.c \ + e1000_i210-5.14-ethercat.h \ + e1000_i210-5.14-orig.c \ + e1000_i210-5.14-orig.h \ e1000_i210-5.15-ethercat.c \ e1000_i210-5.15-ethercat.h \ e1000_i210-5.15-orig.c \ e1000_i210-5.15-orig.h \ + e1000_i210-6.1-ethercat.c \ + e1000_i210-6.1-ethercat.h \ + e1000_i210-6.1-orig.c \ + e1000_i210-6.1-orig.h \ e1000_mac-3.18-ethercat.c \ e1000_mac-3.18-ethercat.h \ e1000_mac-3.18-orig.c \ @@ -104,10 +128,18 @@ EXTRA_DIST = \ e1000_mac-5.10-ethercat.h \ e1000_mac-5.10-orig.c \ e1000_mac-5.10-orig.h \ + e1000_mac-5.14-ethercat.c \ + e1000_mac-5.14-ethercat.h \ + e1000_mac-5.14-orig.c \ + e1000_mac-5.14-orig.h \ e1000_mac-5.15-ethercat.c \ e1000_mac-5.15-ethercat.h \ e1000_mac-5.15-orig.c \ e1000_mac-5.15-orig.h \ + e1000_mac-6.1-ethercat.c \ + e1000_mac-6.1-ethercat.h \ + e1000_mac-6.1-orig.c \ + e1000_mac-6.1-orig.h \ e1000_mbx-3.18-ethercat.c \ e1000_mbx-3.18-ethercat.h \ e1000_mbx-3.18-orig.c \ @@ -124,10 +156,18 @@ EXTRA_DIST = \ e1000_mbx-5.10-ethercat.h \ e1000_mbx-5.10-orig.c \ e1000_mbx-5.10-orig.h \ + e1000_mbx-5.14-ethercat.c \ + e1000_mbx-5.14-ethercat.h \ + e1000_mbx-5.14-orig.c \ + e1000_mbx-5.14-orig.h \ e1000_mbx-5.15-ethercat.c \ e1000_mbx-5.15-ethercat.h \ e1000_mbx-5.15-orig.c \ e1000_mbx-5.15-orig.h \ + e1000_mbx-6.1-ethercat.c \ + e1000_mbx-6.1-ethercat.h \ + e1000_mbx-6.1-orig.c \ + e1000_mbx-6.1-orig.h \ e1000_nvm-3.18-ethercat.c \ e1000_nvm-3.18-ethercat.h \ e1000_nvm-3.18-orig.c \ @@ -144,10 +184,18 @@ EXTRA_DIST = \ e1000_nvm-5.10-ethercat.h \ e1000_nvm-5.10-orig.c \ e1000_nvm-5.10-orig.h \ + e1000_nvm-5.14-ethercat.c \ + e1000_nvm-5.14-ethercat.h \ + e1000_nvm-5.14-orig.c \ + e1000_nvm-5.14-orig.h \ e1000_nvm-5.15-ethercat.c \ e1000_nvm-5.15-ethercat.h \ e1000_nvm-5.15-orig.c \ e1000_nvm-5.15-orig.h \ + e1000_nvm-6.1-ethercat.c \ + e1000_nvm-6.1-ethercat.h \ + e1000_nvm-6.1-orig.c \ + e1000_nvm-6.1-orig.h \ e1000_phy-3.18-ethercat.c \ e1000_phy-3.18-ethercat.h \ e1000_phy-3.18-orig.c \ @@ -164,10 +212,18 @@ EXTRA_DIST = \ e1000_phy-5.10-ethercat.h \ e1000_phy-5.10-orig.c \ e1000_phy-5.10-orig.h \ + e1000_phy-5.14-ethercat.c \ + e1000_phy-5.14-ethercat.h \ + e1000_phy-5.14-orig.c \ + e1000_phy-5.14-orig.h \ e1000_phy-5.15-ethercat.c \ e1000_phy-5.15-ethercat.h \ e1000_phy-5.15-orig.c \ e1000_phy-5.15-orig.h \ + e1000_phy-6.1-ethercat.c \ + e1000_phy-6.1-ethercat.h \ + e1000_phy-6.1-orig.c \ + e1000_phy-6.1-orig.h \ e1000_regs-3.18-ethercat.h \ e1000_regs-3.18-orig.h \ e1000_regs-4.19-ethercat.h \ @@ -176,8 +232,12 @@ EXTRA_DIST = \ e1000_regs-4.4-orig.h \ e1000_regs-5.10-ethercat.h \ e1000_regs-5.10-orig.h \ + e1000_regs-5.14-ethercat.h \ + e1000_regs-5.14-orig.h \ e1000_regs-5.15-ethercat.h \ e1000_regs-5.15-orig.h \ + e1000_regs-6.1-ethercat.h \ + e1000_regs-6.1-orig.h \ igb-3.18-ethercat.h \ igb-3.18-orig.h \ igb-4.19-ethercat.h \ @@ -186,8 +246,12 @@ EXTRA_DIST = \ igb-4.4-orig.h \ igb-5.10-ethercat.h \ igb-5.10-orig.h \ + igb-5.14-ethercat.h \ + igb-5.14-orig.h \ igb-5.15-ethercat.h \ igb-5.15-orig.h \ + igb-6.1-ethercat.h \ + igb-6.1-orig.h \ igb_ethtool-3.18-ethercat.c \ igb_ethtool-3.18-orig.c \ igb_ethtool-4.19-ethercat.c \ @@ -196,8 +260,12 @@ EXTRA_DIST = \ igb_ethtool-4.4-orig.c \ igb_ethtool-5.10-ethercat.c \ igb_ethtool-5.10-orig.c \ + igb_ethtool-5.14-ethercat.c \ + igb_ethtool-5.14-orig.c \ igb_ethtool-5.15-ethercat.c \ igb_ethtool-5.15-orig.c \ + igb_ethtool-6.1-ethercat.c \ + igb_ethtool-6.1-orig.c \ igb_hwmon-3.18-ethercat.c \ igb_hwmon-3.18-orig.c \ igb_hwmon-4.19-ethercat.c \ @@ -206,8 +274,12 @@ EXTRA_DIST = \ igb_hwmon-4.4-orig.c \ igb_hwmon-5.10-ethercat.c \ igb_hwmon-5.10-orig.c \ + igb_hwmon-5.14-ethercat.c \ + igb_hwmon-5.14-orig.c \ igb_hwmon-5.15-ethercat.c \ igb_hwmon-5.15-orig.c \ + igb_hwmon-6.1-ethercat.c \ + igb_hwmon-6.1-orig.c \ igb_main-3.18-ethercat.c \ igb_main-3.18-orig.c \ igb_main-4.19-ethercat.c \ @@ -216,8 +288,12 @@ EXTRA_DIST = \ igb_main-4.4-orig.c \ igb_main-5.10-ethercat.c \ igb_main-5.10-orig.c \ + igb_main-5.14-ethercat.c \ + igb_main-5.14-orig.c \ igb_main-5.15-ethercat.c \ igb_main-5.15-orig.c \ + igb_main-6.1-ethercat.c \ + igb_main-6.1-orig.c \ igb_ptp-3.18-ethercat.c \ igb_ptp-3.18-orig.c \ igb_ptp-4.19-ethercat.c \ @@ -226,8 +302,12 @@ EXTRA_DIST = \ igb_ptp-4.4-orig.c \ igb_ptp-5.10-ethercat.c \ igb_ptp-5.10-orig.c \ + igb_ptp-5.14-ethercat.c \ + igb_ptp-5.14-orig.c \ igb_ptp-5.15-ethercat.c \ igb_ptp-5.15-orig.c \ + igb_ptp-6.1-ethercat.c \ + igb_ptp-6.1-orig.c \ update.sh #------------------------------------------------------------------------------ diff --git a/devices/igb/e1000-5.14-orig.hw.h b/devices/igb/e1000-5.14-orig.hw.h new file mode 100644 index 00000000..44111f65 --- /dev/null +++ b/devices/igb/e1000-5.14-orig.hw.h @@ -0,0 +1,554 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_IGB_HW_H_ +#define _E1000_IGB_HW_H_ + +#include +#include +#include +#include + +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 + +#define E1000_REVISION_2 2 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_invm, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, + e1000_phy_i210, + e1000_phy_bcm54616, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_mbx.h" + +struct e1000_mac_operations { + s32 (*check_for_link)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +#ifdef CONFIG_IGB_HWMON + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); +#endif + void (*write_vfta)(struct e1000_hw *, u32, u32); +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); +}; + +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; + +struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); + struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +extern const struct e1000_info e1000_82575_info; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ + #define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u16 pair_length[4]; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u32 snoop; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* Type of flow control */ + enum e1000_fc_mode requested_mode; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); + s32 (*write)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*read_posted)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write_posted)(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id); + s32 (*check_for_msg)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_ack)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_rst)(struct e1000_hw *hw, u16 mbx_id); + s32 (*unlock)(struct e1000_hw *hw, u16 mbx_id); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; + u8 media_port; + bool media_changed; + bool mas_capable; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +struct net_device *igb_get_hw_dev(struct e1000_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igb_get_hw_dev(hw), format, ##arg) + +/* These functions must be implemented by drivers */ +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +#endif /* _E1000_IGB_HW_H_ */ diff --git a/devices/igb/e1000_-5.14-orig.hy.h b/devices/igb/e1000_-5.14-orig.hy.h new file mode 100644 index 00000000..5894e4b1 --- /dev/null +++ b/devices/igb/e1000_-5.14-orig.hy.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +s32 igb_check_downshift(struct e1000_hw *hw); +s32 igb_check_reset_block(struct e1000_hw *hw); +s32 igb_copper_link_setup_igp(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); +s32 igb_get_phy_id(struct e1000_hw *hw); +s32 igb_get_phy_info_igp(struct e1000_hw *hw); +s32 igb_get_phy_info_m88(struct e1000_hw *hw); +s32 igb_phy_sw_reset(struct e1000_hw *hw); +s32 igb_phy_hw_reset(struct e1000_hw *hw); +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 igb_setup_copper_link(struct e1000_hw *hw); +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igb_power_up_phy_copper(struct e1000_hw *hw); +void igb_power_down_phy_copper(struct e1000_hw *hw); +s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw); +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw); +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 igb_copper_link_setup_82580(struct e1000_hw *hw); +s32 igb_get_phy_info_82580(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); +s32 igb_get_cable_length_82580(struct e1000_hw *hw); +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_check_polarity_m88(struct e1000_hw *hw); + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define I82580_ADDR_REG 16 +#define I82580_CFG_REG 22 +#define I82580_CFG_ASSERT_CRS_ON_TX BIT(15) +#define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */ +#define I82580_CTRL_REG 23 +#define I82580_CTRL_DOWNSHIFT_MASK (7u << 10) + +/* 82580 specific PHY registers */ +#define I82580_PHY_CTRL_2 18 +#define I82580_PHY_LBK_CTRL 19 +#define I82580_PHY_STATUS_2 26 +#define I82580_PHY_DIAG_STATUS 31 + +/* I82580 PHY Status 2 */ +#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82580_PHY_STATUS2_MDIX 0x0800 +#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 +#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 + +/* I82580 PHY Control 2 */ +#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82580 PHY Diagnostics Status */ +#define I82580_DSTATUS_CABLE_LENGTH 0x03FC +#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +/* Enable flexible speed on link-up */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct e1000_sfp_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +#endif diff --git a/devices/igb/e1000_82575-5.14-ethercat.c b/devices/igb/e1000_82575-5.14-ethercat.c new file mode 100644 index 00000000..63d26f2c --- /dev/null +++ b/devices/igb/e1000_82575-5.14-ethercat.c @@ -0,0 +1,2926 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* e1000_82575 + * e1000_82576 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +#include "e1000_mac-5.14-ethercat.h" +#include "e1000_82575-5.14-ethercat.h" +#include "e1000_i210-5.14-ethercat.h" +#include "igb-5.14-ethercat.h" + +static s32 igb_get_invariants_82575(struct e1000_hw *); +static s32 igb_acquire_phy_82575(struct e1000_hw *); +static void igb_release_phy_82575(struct e1000_hw *); +static s32 igb_acquire_nvm_82575(struct e1000_hw *); +static void igb_release_nvm_82575(struct e1000_hw *); +static s32 igb_check_for_link_82575(struct e1000_hw *); +static s32 igb_get_cfg_done_82575(struct e1000_hw *); +static s32 igb_init_hw_82575(struct e1000_hw *); +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); +static s32 igb_reset_hw_82575(struct e1000_hw *); +static s32 igb_reset_hw_82580(struct e1000_hw *); +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_setup_copper_link_82575(struct e1000_hw *); +static s32 igb_setup_serdes_link_82575(struct e1000_hw *); +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); +static void igb_clear_hw_cntrs_82575(struct e1000_hw *); +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, + u16 *); +static s32 igb_get_phy_id_82575(struct e1000_hw *); +static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); +static bool igb_sgmii_active_82575(struct e1000_hw *); +static s32 igb_reset_init_script_82575(struct e1000_hw *); +static s32 igb_read_mac_addr_82575(struct e1000_hw *); +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); +static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; + +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * igb_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + int i; + + for (i = 10; i--;) + array_wr32(E1000_VFTA, offset, value); + + wrfl(); + adapter->shadow_vfta[offset] = value; +} + +/** + * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = rd32(E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + reg = rd32(E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +/** + * igb_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + /* Check the copper medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check the other medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + igb_check_for_link_82575(hw); + } else { + igb_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * igb_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u32 ctrl_ext; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + if (igb_sgmii_active_82575(hw)) { + phy->ops.reset = igb_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = igb_phy_hw_reset; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + wr32(E1000_CTRL_EXT, ctrl_ext); + igb_reset_mdicnfg_82580(hw); + + if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; + phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; + break; + default: + phy->ops.read_reg = igb_read_phy_reg_igp; + phy->ops.write_reg = igb_write_phy_reg_igp; + } + } + + /* set lan id */ + hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> + E1000_STATUS_FUNC_SHIFT; + + /* Set phy->phy_addr and phy->id. */ + ret_val = igb_get_phy_id_82575(hw); + if (ret_val) + return ret_val; + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_phy_info = igb_get_phy_info_m88; + if (phy->id != M88E1111_I_PHY_ID) + phy->ops.get_cable_length = + igb_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = igb_get_cable_length_m88; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + /* Check if this PHY is configured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + igb_check_for_link_media_swap; + } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = igb_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } + if (phy->id == M88E1543_E_PHY_ID) { + ret_val = igb_initialize_M88E1543_phy(hw); + if (ret_val) + goto out; + } + break; + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.get_phy_info = igb_get_phy_info_igp; + phy->ops.get_cable_length = igb_get_cable_length_igp_2; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.force_speed_duplex = + igb_phy_force_speed_duplex_82580; + phy->ops.get_cable_length = igb_get_cable_length_82580; + phy->ops.get_phy_info = igb_get_phy_info_82580; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_cfg_done = igb_get_cfg_done_i210; + phy->ops.get_phy_info = igb_get_phy_info_m88; + phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + break; + case BCM54616_E_PHY_ID: + phy->type = e1000_phy_bcm54616; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u16 size; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; + } + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + + /* NVM Function Pointers */ + nvm->ops.acquire = igb_acquire_nvm_82575; + nvm->ops.release = igb_release_nvm_82575; + nvm->ops.write = igb_write_nvm_spi; + nvm->ops.validate = igb_validate_nvm_checksum; + nvm->ops.update = igb_update_nvm_checksum; + if (nvm->word_size < BIT(15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = igb_validate_nvm_checksum_82580; + nvm->ops.update = igb_update_nvm_checksum_82580; + break; + case e1000_i354: + case e1000_i350: + nvm->ops.validate = igb_validate_nvm_checksum_i350; + nvm->ops.update = igb_update_nvm_checksum_i350; + break; + default: + break; + } + + return 0; +} + +/** + * igb_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ + switch (mac->type) { + case e1000_82576: + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + break; + case e1000_82580: + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + break; + case e1000_i350: + case e1000_i354: + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + break; + default: + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + break; + } + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = igb_reset_hw_82580; + else + mac->ops.reset_hw = igb_reset_hw_82575; + + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; + + } else { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; + } + + if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) + mac->ops.write_vfta = igb_write_vfta_i350; + else + mac->ops.write_vfta = igb_write_vfta; + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + /* enable EEE on i350 parts and later parts */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + else + dev_spec->eee_disable = true; + /* Allow a single clear of the SW semaphore on I210 and newer */ + if (mac->type >= e1000_i210) + dev_spec->clear_semaphore_once = true; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? igb_setup_copper_link_82575 + : igb_setup_serdes_link_82575; + + if (mac->type == e1000_82580 || mac->type == e1000_i350) { + switch (hw->device_id) { + /* feature not supported on these id's */ + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + break; + default: + hw->dev_spec._82575.mas_capable = true; + break; + } + } + return 0; +} + +/** + * igb_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + wrfl(); + + /* Read SFP module data */ + while (timeout) { + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == 0) + break; + msleep(100); + timeout--; + } + if (ret_val != 0) + goto out; + + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != 0) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + hw_dbg("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = 0; +out: + /* Restore I2C interface setting */ + wr32(E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} + +static s32 igb_get_invariants_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + s32 ret_val; + u32 ctrl_ext = 0; + u32 link_mode = 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: + case E1000_DEV_ID_I354_SGMII: + case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = e1000_i354; + break; + default: + return -E1000_ERR_MAC_INIT; + } + + /* Set media type */ + /* The 82575 uses bits 22:23 for link mode. The mode can be changed + * based on the EEPROM. We cannot rely upon device ID. There + * is no distinguishable difference between fiber and internal + * SerDes mode on the 82575. There can be an external PHY attached + * on the SGMII interface. For this, we'll set sgmii_active to true. + */ + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + switch (link_mode) { + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (igb_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + fallthrough; /* for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = igb_set_sfp_media_type_82575(hw); + if ((ret_val != 0) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (dev_spec->sgmii_active) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + wr32(E1000_CTRL_EXT, ctrl_ext); + + break; + default: + break; + } + + /* mac initialization and operations */ + ret_val = igb_init_mac_params_82575(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igb_init_nvm_params_82575(hw); + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + ret_val = igb_init_nvm_params_i210(hw); + break; + default: + break; + } + + if (ret_val) + goto out; + + /* if part supports SR-IOV then initialize mailbox parameters */ + switch (mac->type) { + case e1000_82576: + case e1000_i350: + igb_init_mbx_params_pf(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igb_init_phy_params_82575(hw); + +out: + return ret_val; +} + +/** + * igb_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igb_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static void igb_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +static s32 igb_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + /* Extra read required for some PHY's on i354 */ + if (hw->mac.type == e1000_i354) + igb_get_phy_id(hw); + + /* For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!(igb_sgmii_active_82575(hw))) { + phy->addr = 1; + ret_val = igb_get_phy_id(hw); + goto out; + } + + if (igb_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = rd32(E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + mdic = rd32(E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + ret_val = igb_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + wrfl(); + msleep(300); + + /* The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == 0) { + hw_dbg("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); + /* At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + hw_dbg("PHY address %u was unreadable\n", phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + goto out; + } else { + ret_val = igb_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + wr32(E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + /* This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + hw_dbg("Soft resetting SGMII attached PHY...\n"); + + /* SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + + if (phy->id == M88E1512_E_PHY_ID) + ret_val = igb_initialize_M88E1512_phy(hw); + if (phy->id == M88E1543_E_PHY_ID) + ret_val = igb_initialize_M88E1543_phy(hw); +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + ret_val = igb_acquire_nvm(hw); + + if (ret_val) + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * igb_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_82575(struct e1000_hw *hw) +{ + igb_release_nvm(hw); + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; + + while (i < timeout) { + if (igb_get_hw_semaphore(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); + +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore(hw) != 0) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + + while (timeout) { + if (rd32(E1000_EEMNGCTL) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) + igb_phy_init_script_igp3(hw); + + return 0; +} + +/** + * igb_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = igb_get_speed_and_duplex_copper(hw, speed, + duplex); + + return ret_val; +} + +/** + * igb_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +static s32 igb_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + } else { + ret_val = igb_check_for_copper_link(hw); + } + + return ret_val; +} + +/** + * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +void igb_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = rd32(E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); +} + +/** + * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs, status; + + /* Set up defaults for the return values of this function */ + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; + + /* Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = rd32(E1000_PCS_LSTAT); + + /* The link up bit determines when link is up on autoneg. The sync ok + * gets set once both sides sync up and agree upon link. Stable link + * can be determined by checking for both link up and link sync ok + */ + if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) + *speed = SPEED_1000; + else if (pcs & E1000_PCS_LSTS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = rd32(E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + hw_dbg("2500 Mbs, "); + hw_dbg("Full Duplex\n"); + } + } + + } + + return 0; +} + +/** + * igb_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of fiber serdes, shut down optics and PCS on driver unload + * when management pass thru is not enabled. + **/ +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + if (hw->phy.media_type != e1000_media_type_internal_serdes && + igb_sgmii_active_82575(hw)) + return; + + if (!igb_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = rd32(E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = rd32(E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); + } +} + +/** + * igb_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = igb_set_pcie_completion_timeout(hw); + if (ret_val) + hw_dbg("PCI-E Set completion timeout has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(E1000_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) + igb_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + return ret_val; +} + +/** + * igb_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 igb_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + if ((hw->mac.type >= e1000_i210) && + !(igb_get_flash_presence_i210(hw))) { + ret_val = igb_pll_workaround_i210(hw); + if (ret_val) + return ret_val; + } + + /* Initialize identification LED */ + ret_val = igb_id_led_init(hw); + if (ret_val) { + hw_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + hw_dbg("Initializing the IEEE VLAN\n"); + igb_clear_vfta(hw); + + /* Setup the receive address */ + igb_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igb_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igb_clear_hw_cntrs_82575(hw); + return ret_val; +} + +/** + * igb_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u32 phpm_reg; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + wr32(E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + + ret_val = igb_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ + msleep(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_i210: + case e1000_phy_m88: + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + ret_val = igb_copper_link_setup_m88_gen2(hw); + break; + default: + ret_val = igb_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: + ret_val = igb_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = igb_copper_link_setup_82580(hw); + break; + case e1000_phy_bcm54616: + ret_val = 0; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = igb_setup_copper_link(hw); +out: + return ret_val; +} + +/** + * igb_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; + s32 ret_val = 0; + u16 data; + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return ret_val; + + + /* On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present and turn on I2C */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + ctrl_ext |= E1000_CTRL_I2C_ENA; + wr32(E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { + /* set both sw defined pins */ + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + /* Set switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg |= E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + } + + reg = rd32(E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + fallthrough; + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + wr32(E1000_CTRL, ctrl_reg); + + /* New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + + /* Disable force flow control for autoneg */ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ + anadv_reg = rd32(E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + anadv_reg |= E1000_TXCW_PAUSE; + break; + case e1000_fc_tx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + break; + default: + break; + } + wr32(E1000_PCS_ANADV, anadv_reg); + + hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + wr32(E1000_PCS_LCTL, reg); + + if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) + igb_force_mac_fc(hw); + + return ret_val; +} + +/** + * igb_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +static bool igb_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * igb_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +static s32 igb_reset_init_script_82575(struct e1000_hw *hw) +{ + if (hw->mac.type == e1000_82575) { + hw_dbg("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); + } + + return 0; +} + +/** + * igb_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = igb_check_alt_mac_addr(hw); + if (ret_val) + goto out; + + ret_val = igb_read_mac_addr(hw); + +out: + return ret_val; +} + +/** + * igb_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +void igb_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) + igb_power_down_phy_copper(hw); +} + +/** + * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + igb_clear_hw_cntrs_base(hw); + + rd32(E1000_PRC64); + rd32(E1000_PRC127); + rd32(E1000_PRC255); + rd32(E1000_PRC511); + rd32(E1000_PRC1023); + rd32(E1000_PRC1522); + rd32(E1000_PTC64); + rd32(E1000_PTC127); + rd32(E1000_PTC255); + rd32(E1000_PTC511); + rd32(E1000_PTC1023); + rd32(E1000_PTC1522); + + rd32(E1000_ALGNERRC); + rd32(E1000_RXERRC); + rd32(E1000_TNCRS); + rd32(E1000_CEXTERR); + rd32(E1000_TSCTC); + rd32(E1000_TSCTFC); + + rd32(E1000_MGTPRC); + rd32(E1000_MGTPDC); + rd32(E1000_MGTPTC); + + rd32(E1000_IAC); + rd32(E1000_ICRXOC); + + rd32(E1000_ICRXPTC); + rd32(E1000_ICRXATC); + rd32(E1000_ICTXPTC); + rd32(E1000_ICTXATC); + rd32(E1000_ICTXQEC); + rd32(E1000_ICTXQMTC); + rd32(E1000_ICRXDMTC); + + rd32(E1000_CBTMPC); + rd32(E1000_HTDPMC); + rd32(E1000_CBRMPC); + rd32(E1000_RPTHC); + rd32(E1000_HGPTC); + rd32(E1000_HTCBDPC); + rd32(E1000_HGORCL); + rd32(E1000_HGORCH); + rd32(E1000_HGOTCL); + rd32(E1000_HGOTCH); + rd32(E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + igb_sgmii_active_82575(hw)) + rd32(E1000_SCVPC); +} + +/** + * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable + * @hw: pointer to the HW structure + * + * After rx enable if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void igb_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + wr32(E1000_RFCTL, rfctl); + + if (hw->mac.type != e1000_82575 || + !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all RX queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(E1000_RXDCTL(i)); + wr32(E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = rd32(E1000_RLPML); + wr32(E1000_RLPML, 0); + + rctl = rd32(E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + wr32(E1000_RCTL, temp_rctl); + wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable RX queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(E1000_RXDCTL(i), rxdctl[i]); + wr32(E1000_RCTL, rctl); + wrfl(); + + wr32(E1000_RLPML, rlpml); + wr32(E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(E1000_ROC); + rd32(E1000_RNBC); + rd32(E1000_MPC); +} + +/** + * igb_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = rd32(E1000_GCR); + s32 ret_val = 0; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* if capabilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + wr32(E1000_GCR, gcr); + return ret_val; +} + +/** + * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 reg_val, reg_offset; + + switch (hw->mac.type) { + case e1000_82576: + reg_offset = E1000_DTXSWC; + break; + case e1000_i350: + case e1000_i354: + reg_offset = E1000_TXSWC; + break; + default: + return; + } + + reg_val = rd32(reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + wr32(reg_offset, reg_val); +} + +/** + * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = rd32(E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_DTXSWC, dtxswc); + break; + case e1000_i354: + case e1000_i350: + dtxswc = rd32(E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + +} + +/** + * igb_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = rd32(E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + wr32(E1000_VT_CTL, vt_ctl); +} + +/** + * igb_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 mdicnfg; + u16 nvm_data = 0; + + if (hw->mac.type != e1000_82580) + goto out; + if (!igb_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + mdicnfg = rd32(E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + wr32(E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * igb_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +static s32 igb_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + hw->dev_spec._82575.global_device_reset = false; + + /* due to hw errata, global device reset doesn't always + * work on 82580 + */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ + ctrl = rd32(E1000_CTRL); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 11000); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && + hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && + !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + wr32(E1000_CTRL, ctrl); + wrfl(); + + /* Add delay to insure DEV_RST has time to complete */ + if (global_device_reset) + usleep_range(5000, 6000); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ + wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + ret_val = igb_reset_mdicnfg_82580(hw); + if (ret_val) + hw_dbg("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + /* Release semaphore */ + if (global_device_reset) + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); + + return ret_val; +} + +/** + * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 igb_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * igb_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if checksums compatibility bit is set validate checksums + * for all 4 ports. + */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); + goto out; + } + + if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * __igb_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @address: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * igb_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + return __igb_access_emi_reg(hw, addr, data, true); +} + +/** + * igb_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + u32 ipcnfg, eeer; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + goto out; + ipcnfg = rd32(E1000_IPCNFG); + eeer = rd32(E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + u32 eee_su = rd32(E1000_EEE_SU); + + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & E1000_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + wr32(E1000_IPCNFG, ipcnfg); + wr32(E1000_EEER, eeer); + rd32(E1000_IPCNFG); + rd32(E1000_EEER); +out: + + return 0; +} + +/** + * igb_set_eee_i354 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + if (!hw->dev_spec._82575.eee_disable) { + /* Switch to PHY page 18. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, + phy_data); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + /* Turn on EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } else { + /* Turn off EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED); + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } + +out: + return ret_val; +} + +/** + * igb_get_eee_status_i354 - Get EEE status + * @hw: pointer to the HW structure + * @status: EEE status + * + * Get EEE status by guessing based on whether Tx or Rx LPI indications have + * been received. + **/ +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + /* Check if EEE is supported on this device. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, + E1000_PCS_STATUS_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | + E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: + return ret_val; +} + +#ifdef CONFIG_IGB_HWMON +static const u8 e1000_emc_temp_data[4] = { + E1000_EMC_INTERNAL_DATA, + E1000_EMC_DIODE1_DATA, + E1000_EMC_DIODE2_DATA, + E1000_EMC_DIODE3_DATA +}; +static const u8 e1000_emc_therm_limit[4] = { + E1000_EMC_INTERNAL_THERM_LIMIT, + E1000_EMC_DIODE1_THERM_LIMIT, + E1000_EMC_DIODE2_THERM_LIMIT, + E1000_EMC_DIODE3_THERM_LIMIT +}; + +/** + * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + if (num_sensors > E1000_MAX_SENSORS) + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) + hw->phy.ops.read_i2c_byte(hw, + e1000_emc_temp_data[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } + return 0; +} + +/** + * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = + (rd32(E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = + (rd32(E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> + NVM_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + e1000_emc_therm_limit[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + therm_limit); + + if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return 0; +} + +#endif +static struct e1000_mac_operations e1000_mac_ops_82575 = { + .init_hw = igb_init_hw_82575, + .check_for_link = igb_check_for_link_82575, + .rar_set = igb_rar_set, + .read_mac_addr = igb_read_mac_addr_82575, + .get_speed_and_duplex = igb_get_link_up_info_82575, +#ifdef CONFIG_IGB_HWMON + .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, + .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, +#endif +}; + +static const struct e1000_phy_operations e1000_phy_ops_82575 = { + .acquire = igb_acquire_phy_82575, + .get_cfg_done = igb_get_cfg_done_82575, + .release = igb_release_phy_82575, + .write_i2c_byte = igb_write_i2c_byte, + .read_i2c_byte = igb_read_i2c_byte, +}; + +static struct e1000_nvm_operations e1000_nvm_ops_82575 = { + .acquire = igb_acquire_nvm_82575, + .read = igb_read_nvm_eerd, + .release = igb_release_nvm_82575, + .write = igb_write_nvm_spi, +}; + +const struct e1000_info e1000_82575_info = { + .get_invariants = igb_get_invariants_82575, + .mac_ops = &e1000_mac_ops_82575, + .phy_ops = &e1000_phy_ops_82575, + .nvm_ops = &e1000_nvm_ops_82575, +}; diff --git a/devices/igb/e1000_82575-5.14-ethercat.h b/devices/igb/e1000_82575-5.14-ethercat.h new file mode 100644 index 00000000..63ec253a --- /dev/null +++ b/devices/igb/e1000_82575-5.14-ethercat.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); +void igb_power_up_serdes_link_82575(struct e1000_hw *hw); +void igb_power_down_phy_copper_82575(struct e1000_hw *hw); +void igb_rx_fifo_flush_82575(struct e1000_hw *hw); +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 + +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 + + +#define E1000_MRQC_ENABLE_RSS_MQ 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_ENABLE_VMDQ_RSS_MQ 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + __le16 pkt_info; /* RSS type, Packet type */ + __le16 hdr_info; /* Split Head, buf len */ + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +/* Adv ctxt IPSec ESP len mask */ + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ + +/* Additional DCA related definitions, note change in position of CPUID */ +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE BIT(26) +#define E1000_ETQF_1588 BIT(30) +#define E1000_ETQF_IMM_INT BIT(29) +#define E1000_ETQF_QUEUE_ENABLE BIT(31) +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_ETQF_QUEUE_MASK 0x00070000 +#define E1000_ETQF_ETYPE_MASK 0x0000FFFF + +/* FTQF register bit definitions */ +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 8 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC BIT(28) +#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29) +#define E1000_VT_CTL_VM_REPL_EN BIT(30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT BIT(14) + +#define ALL_QUEUES 0xFFFF + +/* RX packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); +void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); +void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); +u16 igb_rxpbs_adjust_82580(u32 data); +s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); +s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); + +#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define E1000_EMC_INTERNAL_DATA 0x00 +#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +#define E1000_EMC_DIODE1_DATA 0x01 +#define E1000_EMC_DIODE1_THERM_LIMIT 0x19 +#define E1000_EMC_DIODE2_DATA 0x23 +#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A +#define E1000_EMC_DIODE3_DATA 0x2A +#define E1000_EMC_DIODE3_THERM_LIMIT 0x30 +#endif diff --git a/devices/igb/e1000_82575-5.14-orig.c b/devices/igb/e1000_82575-5.14-orig.c new file mode 100644 index 00000000..cbe92fd2 --- /dev/null +++ b/devices/igb/e1000_82575-5.14-orig.c @@ -0,0 +1,2927 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* e1000_82575 + * e1000_82576 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +#include "e1000_mac.h" +#include "e1000_82575.h" +#include "e1000_i210.h" +#include "igb.h" + +static s32 igb_get_invariants_82575(struct e1000_hw *); +static s32 igb_acquire_phy_82575(struct e1000_hw *); +static void igb_release_phy_82575(struct e1000_hw *); +static s32 igb_acquire_nvm_82575(struct e1000_hw *); +static void igb_release_nvm_82575(struct e1000_hw *); +static s32 igb_check_for_link_82575(struct e1000_hw *); +static s32 igb_get_cfg_done_82575(struct e1000_hw *); +static s32 igb_init_hw_82575(struct e1000_hw *); +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); +static s32 igb_reset_hw_82575(struct e1000_hw *); +static s32 igb_reset_hw_82580(struct e1000_hw *); +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_setup_copper_link_82575(struct e1000_hw *); +static s32 igb_setup_serdes_link_82575(struct e1000_hw *); +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); +static void igb_clear_hw_cntrs_82575(struct e1000_hw *); +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, + u16 *); +static s32 igb_get_phy_id_82575(struct e1000_hw *); +static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); +static bool igb_sgmii_active_82575(struct e1000_hw *); +static s32 igb_reset_init_script_82575(struct e1000_hw *); +static s32 igb_read_mac_addr_82575(struct e1000_hw *); +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); +static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; + +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * igb_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + int i; + + for (i = 10; i--;) + array_wr32(E1000_VFTA, offset, value); + + wrfl(); + adapter->shadow_vfta[offset] = value; +} + +/** + * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = rd32(E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + reg = rd32(E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +/** + * igb_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + /* Check the copper medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check the other medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + igb_check_for_link_82575(hw); + } else { + igb_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * igb_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u32 ctrl_ext; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + if (igb_sgmii_active_82575(hw)) { + phy->ops.reset = igb_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = igb_phy_hw_reset; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + wr32(E1000_CTRL_EXT, ctrl_ext); + igb_reset_mdicnfg_82580(hw); + + if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; + phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; + break; + default: + phy->ops.read_reg = igb_read_phy_reg_igp; + phy->ops.write_reg = igb_write_phy_reg_igp; + } + } + + /* set lan id */ + hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> + E1000_STATUS_FUNC_SHIFT; + + /* Set phy->phy_addr and phy->id. */ + ret_val = igb_get_phy_id_82575(hw); + if (ret_val) + return ret_val; + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_phy_info = igb_get_phy_info_m88; + if (phy->id != M88E1111_I_PHY_ID) + phy->ops.get_cable_length = + igb_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = igb_get_cable_length_m88; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + /* Check if this PHY is configured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + igb_check_for_link_media_swap; + } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = igb_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } + if (phy->id == M88E1543_E_PHY_ID) { + ret_val = igb_initialize_M88E1543_phy(hw); + if (ret_val) + goto out; + } + break; + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.get_phy_info = igb_get_phy_info_igp; + phy->ops.get_cable_length = igb_get_cable_length_igp_2; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.force_speed_duplex = + igb_phy_force_speed_duplex_82580; + phy->ops.get_cable_length = igb_get_cable_length_82580; + phy->ops.get_phy_info = igb_get_phy_info_82580; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_cfg_done = igb_get_cfg_done_i210; + phy->ops.get_phy_info = igb_get_phy_info_m88; + phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + break; + case BCM54616_E_PHY_ID: + phy->type = e1000_phy_bcm54616; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u16 size; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; + } + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + + /* NVM Function Pointers */ + nvm->ops.acquire = igb_acquire_nvm_82575; + nvm->ops.release = igb_release_nvm_82575; + nvm->ops.write = igb_write_nvm_spi; + nvm->ops.validate = igb_validate_nvm_checksum; + nvm->ops.update = igb_update_nvm_checksum; + if (nvm->word_size < BIT(15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = igb_validate_nvm_checksum_82580; + nvm->ops.update = igb_update_nvm_checksum_82580; + break; + case e1000_i354: + case e1000_i350: + nvm->ops.validate = igb_validate_nvm_checksum_i350; + nvm->ops.update = igb_update_nvm_checksum_i350; + break; + default: + break; + } + + return 0; +} + +/** + * igb_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ + switch (mac->type) { + case e1000_82576: + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + break; + case e1000_82580: + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + break; + case e1000_i350: + case e1000_i354: + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + break; + default: + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + break; + } + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = igb_reset_hw_82580; + else + mac->ops.reset_hw = igb_reset_hw_82575; + + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; + + } else { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; + } + + if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) + mac->ops.write_vfta = igb_write_vfta_i350; + else + mac->ops.write_vfta = igb_write_vfta; + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + /* enable EEE on i350 parts and later parts */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + else + dev_spec->eee_disable = true; + /* Allow a single clear of the SW semaphore on I210 and newer */ + if (mac->type >= e1000_i210) + dev_spec->clear_semaphore_once = true; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? igb_setup_copper_link_82575 + : igb_setup_serdes_link_82575; + + if (mac->type == e1000_82580 || mac->type == e1000_i350) { + switch (hw->device_id) { + /* feature not supported on these id's */ + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + break; + default: + hw->dev_spec._82575.mas_capable = true; + break; + } + } + return 0; +} + +/** + * igb_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + wrfl(); + + /* Read SFP module data */ + while (timeout) { + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == 0) + break; + msleep(100); + timeout--; + } + if (ret_val != 0) + goto out; + + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != 0) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + hw_dbg("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = 0; +out: + /* Restore I2C interface setting */ + wr32(E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} + +static s32 igb_get_invariants_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + s32 ret_val; + u32 ctrl_ext = 0; + u32 link_mode = 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: + case E1000_DEV_ID_I354_SGMII: + case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = e1000_i354; + break; + default: + return -E1000_ERR_MAC_INIT; + } + + /* Set media type */ + /* The 82575 uses bits 22:23 for link mode. The mode can be changed + * based on the EEPROM. We cannot rely upon device ID. There + * is no distinguishable difference between fiber and internal + * SerDes mode on the 82575. There can be an external PHY attached + * on the SGMII interface. For this, we'll set sgmii_active to true. + */ + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + switch (link_mode) { + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (igb_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + fallthrough; /* for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = igb_set_sfp_media_type_82575(hw); + if ((ret_val != 0) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (dev_spec->sgmii_active) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + wr32(E1000_CTRL_EXT, ctrl_ext); + + break; + default: + break; + } + + /* mac initialization and operations */ + ret_val = igb_init_mac_params_82575(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igb_init_nvm_params_82575(hw); + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + ret_val = igb_init_nvm_params_i210(hw); + break; + default: + break; + } + + if (ret_val) + goto out; + + /* if part supports SR-IOV then initialize mailbox parameters */ + switch (mac->type) { + case e1000_82576: + case e1000_i350: + igb_init_mbx_params_pf(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igb_init_phy_params_82575(hw); + +out: + return ret_val; +} + +/** + * igb_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igb_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static void igb_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +static s32 igb_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + /* Extra read required for some PHY's on i354 */ + if (hw->mac.type == e1000_i354) + igb_get_phy_id(hw); + + /* For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!(igb_sgmii_active_82575(hw))) { + phy->addr = 1; + ret_val = igb_get_phy_id(hw); + goto out; + } + + if (igb_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = rd32(E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + mdic = rd32(E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + ret_val = igb_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + wrfl(); + msleep(300); + + /* The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == 0) { + hw_dbg("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); + /* At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + hw_dbg("PHY address %u was unreadable\n", phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + goto out; + } else { + ret_val = igb_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + wr32(E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + /* This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + hw_dbg("Soft resetting SGMII attached PHY...\n"); + + /* SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + + if (phy->id == M88E1512_E_PHY_ID) + ret_val = igb_initialize_M88E1512_phy(hw); + if (phy->id == M88E1543_E_PHY_ID) + ret_val = igb_initialize_M88E1543_phy(hw); +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + ret_val = igb_acquire_nvm(hw); + + if (ret_val) + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * igb_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_82575(struct e1000_hw *hw) +{ + igb_release_nvm(hw); + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; + + while (i < timeout) { + if (igb_get_hw_semaphore(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); + +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore(hw) != 0) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + + while (timeout) { + if (rd32(E1000_EEMNGCTL) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) + igb_phy_init_script_igp3(hw); + + return 0; +} + +/** + * igb_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = igb_get_speed_and_duplex_copper(hw, speed, + duplex); + + return ret_val; +} + +/** + * igb_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +static s32 igb_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + } else { + ret_val = igb_check_for_copper_link(hw); + } + + return ret_val; +} + +/** + * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +void igb_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = rd32(E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); +} + +/** + * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs, status; + + /* Set up defaults for the return values of this function */ + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; + + /* Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = rd32(E1000_PCS_LSTAT); + + /* The link up bit determines when link is up on autoneg. The sync ok + * gets set once both sides sync up and agree upon link. Stable link + * can be determined by checking for both link up and link sync ok + */ + if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) + *speed = SPEED_1000; + else if (pcs & E1000_PCS_LSTS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = rd32(E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + hw_dbg("2500 Mbs, "); + hw_dbg("Full Duplex\n"); + } + } + + } + + return 0; +} + +/** + * igb_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of fiber serdes, shut down optics and PCS on driver unload + * when management pass thru is not enabled. + **/ +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + if (hw->phy.media_type != e1000_media_type_internal_serdes && + igb_sgmii_active_82575(hw)) + return; + + if (!igb_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = rd32(E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = rd32(E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); + } +} + +/** + * igb_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = igb_set_pcie_completion_timeout(hw); + if (ret_val) + hw_dbg("PCI-E Set completion timeout has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(E1000_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) + igb_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + return ret_val; +} + +/** + * igb_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 igb_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + if ((hw->mac.type >= e1000_i210) && + !(igb_get_flash_presence_i210(hw))) { + ret_val = igb_pll_workaround_i210(hw); + if (ret_val) + return ret_val; + } + + /* Initialize identification LED */ + ret_val = igb_id_led_init(hw); + if (ret_val) { + hw_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + hw_dbg("Initializing the IEEE VLAN\n"); + igb_clear_vfta(hw); + + /* Setup the receive address */ + igb_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igb_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igb_clear_hw_cntrs_82575(hw); + return ret_val; +} + +/** + * igb_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u32 phpm_reg; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + wr32(E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + + ret_val = igb_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ + msleep(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_i210: + case e1000_phy_m88: + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + ret_val = igb_copper_link_setup_m88_gen2(hw); + break; + default: + ret_val = igb_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: + ret_val = igb_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = igb_copper_link_setup_82580(hw); + break; + case e1000_phy_bcm54616: + ret_val = 0; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = igb_setup_copper_link(hw); +out: + return ret_val; +} + +/** + * igb_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; + s32 ret_val = 0; + u16 data; + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return ret_val; + + + /* On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present and turn on I2C */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + ctrl_ext |= E1000_CTRL_I2C_ENA; + wr32(E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { + /* set both sw defined pins */ + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + /* Set switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg |= E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + } + + reg = rd32(E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + fallthrough; + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + wr32(E1000_CTRL, ctrl_reg); + + /* New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + + /* Disable force flow control for autoneg */ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ + anadv_reg = rd32(E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + anadv_reg |= E1000_TXCW_PAUSE; + break; + case e1000_fc_tx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + break; + default: + break; + } + wr32(E1000_PCS_ANADV, anadv_reg); + + hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + wr32(E1000_PCS_LCTL, reg); + + if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) + igb_force_mac_fc(hw); + + return ret_val; +} + +/** + * igb_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +static bool igb_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * igb_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +static s32 igb_reset_init_script_82575(struct e1000_hw *hw) +{ + if (hw->mac.type == e1000_82575) { + hw_dbg("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); + } + + return 0; +} + +/** + * igb_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = igb_check_alt_mac_addr(hw); + if (ret_val) + goto out; + + ret_val = igb_read_mac_addr(hw); + +out: + return ret_val; +} + +/** + * igb_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +void igb_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) + igb_power_down_phy_copper(hw); +} + +/** + * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + igb_clear_hw_cntrs_base(hw); + + rd32(E1000_PRC64); + rd32(E1000_PRC127); + rd32(E1000_PRC255); + rd32(E1000_PRC511); + rd32(E1000_PRC1023); + rd32(E1000_PRC1522); + rd32(E1000_PTC64); + rd32(E1000_PTC127); + rd32(E1000_PTC255); + rd32(E1000_PTC511); + rd32(E1000_PTC1023); + rd32(E1000_PTC1522); + + rd32(E1000_ALGNERRC); + rd32(E1000_RXERRC); + rd32(E1000_TNCRS); + rd32(E1000_CEXTERR); + rd32(E1000_TSCTC); + rd32(E1000_TSCTFC); + + rd32(E1000_MGTPRC); + rd32(E1000_MGTPDC); + rd32(E1000_MGTPTC); + + rd32(E1000_IAC); + rd32(E1000_ICRXOC); + + rd32(E1000_ICRXPTC); + rd32(E1000_ICRXATC); + rd32(E1000_ICTXPTC); + rd32(E1000_ICTXATC); + rd32(E1000_ICTXQEC); + rd32(E1000_ICTXQMTC); + rd32(E1000_ICRXDMTC); + + rd32(E1000_CBTMPC); + rd32(E1000_HTDPMC); + rd32(E1000_CBRMPC); + rd32(E1000_RPTHC); + rd32(E1000_HGPTC); + rd32(E1000_HTCBDPC); + rd32(E1000_HGORCL); + rd32(E1000_HGORCH); + rd32(E1000_HGOTCL); + rd32(E1000_HGOTCH); + rd32(E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + igb_sgmii_active_82575(hw)) + rd32(E1000_SCVPC); +} + +/** + * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable + * @hw: pointer to the HW structure + * + * After rx enable if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void igb_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + wr32(E1000_RFCTL, rfctl); + + if (hw->mac.type != e1000_82575 || + !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all RX queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(E1000_RXDCTL(i)); + wr32(E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = rd32(E1000_RLPML); + wr32(E1000_RLPML, 0); + + rctl = rd32(E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + wr32(E1000_RCTL, temp_rctl); + wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable RX queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(E1000_RXDCTL(i), rxdctl[i]); + wr32(E1000_RCTL, rctl); + wrfl(); + + wr32(E1000_RLPML, rlpml); + wr32(E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(E1000_ROC); + rd32(E1000_RNBC); + rd32(E1000_MPC); +} + +/** + * igb_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = rd32(E1000_GCR); + s32 ret_val = 0; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* if capabilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + wr32(E1000_GCR, gcr); + return ret_val; +} + +/** + * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 reg_val, reg_offset; + + switch (hw->mac.type) { + case e1000_82576: + reg_offset = E1000_DTXSWC; + break; + case e1000_i350: + case e1000_i354: + reg_offset = E1000_TXSWC; + break; + default: + return; + } + + reg_val = rd32(reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + wr32(reg_offset, reg_val); +} + +/** + * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = rd32(E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_DTXSWC, dtxswc); + break; + case e1000_i354: + case e1000_i350: + dtxswc = rd32(E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + +} + +/** + * igb_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = rd32(E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + wr32(E1000_VT_CTL, vt_ctl); +} + +/** + * igb_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 mdicnfg; + u16 nvm_data = 0; + + if (hw->mac.type != e1000_82580) + goto out; + if (!igb_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + mdicnfg = rd32(E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + wr32(E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * igb_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +static s32 igb_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + hw->dev_spec._82575.global_device_reset = false; + + /* due to hw errata, global device reset doesn't always + * work on 82580 + */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ + ctrl = rd32(E1000_CTRL); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 11000); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && + hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && + !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + wr32(E1000_CTRL, ctrl); + wrfl(); + + /* Add delay to insure DEV_RST has time to complete */ + if (global_device_reset) + usleep_range(5000, 6000); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ + wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + ret_val = igb_reset_mdicnfg_82580(hw); + if (ret_val) + hw_dbg("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + /* Release semaphore */ + if (global_device_reset) + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); + + return ret_val; +} + +/** + * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 igb_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * igb_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if checksums compatibility bit is set validate checksums + * for all 4 ports. + */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); + goto out; + } + + if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * __igb_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @address: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * igb_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + return __igb_access_emi_reg(hw, addr, data, true); +} + +/** + * igb_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + u32 ipcnfg, eeer; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + goto out; + ipcnfg = rd32(E1000_IPCNFG); + eeer = rd32(E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + u32 eee_su = rd32(E1000_EEE_SU); + + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & E1000_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + wr32(E1000_IPCNFG, ipcnfg); + wr32(E1000_EEER, eeer); + rd32(E1000_IPCNFG); + rd32(E1000_EEER); +out: + + return 0; +} + +/** + * igb_set_eee_i354 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + if (!hw->dev_spec._82575.eee_disable) { + /* Switch to PHY page 18. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, + phy_data); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + /* Turn on EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } else { + /* Turn off EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED); + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } + +out: + return ret_val; +} + +/** + * igb_get_eee_status_i354 - Get EEE status + * @hw: pointer to the HW structure + * @status: EEE status + * + * Get EEE status by guessing based on whether Tx or Rx LPI indications have + * been received. + **/ +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + /* Check if EEE is supported on this device. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, + E1000_PCS_STATUS_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | + E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: + return ret_val; +} + +#ifdef CONFIG_IGB_HWMON +static const u8 e1000_emc_temp_data[4] = { + E1000_EMC_INTERNAL_DATA, + E1000_EMC_DIODE1_DATA, + E1000_EMC_DIODE2_DATA, + E1000_EMC_DIODE3_DATA +}; +static const u8 e1000_emc_therm_limit[4] = { + E1000_EMC_INTERNAL_THERM_LIMIT, + E1000_EMC_DIODE1_THERM_LIMIT, + E1000_EMC_DIODE2_THERM_LIMIT, + E1000_EMC_DIODE3_THERM_LIMIT +}; + +/** + * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + if (num_sensors > E1000_MAX_SENSORS) + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) + hw->phy.ops.read_i2c_byte(hw, + e1000_emc_temp_data[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } + return 0; +} + +/** + * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = + (rd32(E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = + (rd32(E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> + NVM_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + e1000_emc_therm_limit[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + therm_limit); + + if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return 0; +} + +#endif +static struct e1000_mac_operations e1000_mac_ops_82575 = { + .init_hw = igb_init_hw_82575, + .check_for_link = igb_check_for_link_82575, + .rar_set = igb_rar_set, + .read_mac_addr = igb_read_mac_addr_82575, + .get_speed_and_duplex = igb_get_link_up_info_82575, +#ifdef CONFIG_IGB_HWMON + .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, + .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, +#endif +}; + +static const struct e1000_phy_operations e1000_phy_ops_82575 = { + .acquire = igb_acquire_phy_82575, + .get_cfg_done = igb_get_cfg_done_82575, + .release = igb_release_phy_82575, + .write_i2c_byte = igb_write_i2c_byte, + .read_i2c_byte = igb_read_i2c_byte, +}; + +static struct e1000_nvm_operations e1000_nvm_ops_82575 = { + .acquire = igb_acquire_nvm_82575, + .read = igb_read_nvm_eerd, + .release = igb_release_nvm_82575, + .write = igb_write_nvm_spi, +}; + +const struct e1000_info e1000_82575_info = { + .get_invariants = igb_get_invariants_82575, + .mac_ops = &e1000_mac_ops_82575, + .phy_ops = &e1000_phy_ops_82575, + .nvm_ops = &e1000_nvm_ops_82575, +}; + diff --git a/devices/igb/e1000_82575-5.14-orig.h b/devices/igb/e1000_82575-5.14-orig.h new file mode 100644 index 00000000..63ec253a --- /dev/null +++ b/devices/igb/e1000_82575-5.14-orig.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); +void igb_power_up_serdes_link_82575(struct e1000_hw *hw); +void igb_power_down_phy_copper_82575(struct e1000_hw *hw); +void igb_rx_fifo_flush_82575(struct e1000_hw *hw); +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 + +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 + + +#define E1000_MRQC_ENABLE_RSS_MQ 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_ENABLE_VMDQ_RSS_MQ 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + __le16 pkt_info; /* RSS type, Packet type */ + __le16 hdr_info; /* Split Head, buf len */ + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +/* Adv ctxt IPSec ESP len mask */ + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ + +/* Additional DCA related definitions, note change in position of CPUID */ +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE BIT(26) +#define E1000_ETQF_1588 BIT(30) +#define E1000_ETQF_IMM_INT BIT(29) +#define E1000_ETQF_QUEUE_ENABLE BIT(31) +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_ETQF_QUEUE_MASK 0x00070000 +#define E1000_ETQF_ETYPE_MASK 0x0000FFFF + +/* FTQF register bit definitions */ +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 8 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC BIT(28) +#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29) +#define E1000_VT_CTL_VM_REPL_EN BIT(30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT BIT(14) + +#define ALL_QUEUES 0xFFFF + +/* RX packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); +void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); +void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); +u16 igb_rxpbs_adjust_82580(u32 data); +s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); +s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); + +#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define E1000_EMC_INTERNAL_DATA 0x00 +#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +#define E1000_EMC_DIODE1_DATA 0x01 +#define E1000_EMC_DIODE1_THERM_LIMIT 0x19 +#define E1000_EMC_DIODE2_DATA 0x23 +#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A +#define E1000_EMC_DIODE3_DATA 0x2A +#define E1000_EMC_DIODE3_THERM_LIMIT 0x30 +#endif diff --git a/devices/igb/e1000_82575-6.1-ethercat.c b/devices/igb/e1000_82575-6.1-ethercat.c new file mode 100644 index 00000000..9628c7a0 --- /dev/null +++ b/devices/igb/e1000_82575-6.1-ethercat.c @@ -0,0 +1,2926 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* e1000_82575 + * e1000_82576 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +#include "e1000_mac-6.1-ethercat.h" +#include "e1000_82575-6.1-ethercat.h" +#include "e1000_i210-6.1-ethercat.h" +#include "igb-6.1-ethercat.h" + +static s32 igb_get_invariants_82575(struct e1000_hw *); +static s32 igb_acquire_phy_82575(struct e1000_hw *); +static void igb_release_phy_82575(struct e1000_hw *); +static s32 igb_acquire_nvm_82575(struct e1000_hw *); +static void igb_release_nvm_82575(struct e1000_hw *); +static s32 igb_check_for_link_82575(struct e1000_hw *); +static s32 igb_get_cfg_done_82575(struct e1000_hw *); +static s32 igb_init_hw_82575(struct e1000_hw *); +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); +static s32 igb_reset_hw_82575(struct e1000_hw *); +static s32 igb_reset_hw_82580(struct e1000_hw *); +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_setup_copper_link_82575(struct e1000_hw *); +static s32 igb_setup_serdes_link_82575(struct e1000_hw *); +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); +static void igb_clear_hw_cntrs_82575(struct e1000_hw *); +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, + u16 *); +static s32 igb_get_phy_id_82575(struct e1000_hw *); +static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); +static bool igb_sgmii_active_82575(struct e1000_hw *); +static s32 igb_reset_init_script_82575(struct e1000_hw *); +static s32 igb_read_mac_addr_82575(struct e1000_hw *); +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); +static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; + +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * igb_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + int i; + + for (i = 10; i--;) + array_wr32(E1000_VFTA, offset, value); + + wrfl(); + adapter->shadow_vfta[offset] = value; +} + +/** + * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = rd32(E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + reg = rd32(E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +/** + * igb_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + /* Check the copper medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check the other medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + igb_check_for_link_82575(hw); + } else { + igb_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * igb_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u32 ctrl_ext; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + if (igb_sgmii_active_82575(hw)) { + phy->ops.reset = igb_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = igb_phy_hw_reset; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + wr32(E1000_CTRL_EXT, ctrl_ext); + igb_reset_mdicnfg_82580(hw); + + if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; + phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; + break; + default: + phy->ops.read_reg = igb_read_phy_reg_igp; + phy->ops.write_reg = igb_write_phy_reg_igp; + } + } + + /* set lan id */ + hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> + E1000_STATUS_FUNC_SHIFT; + + /* Set phy->phy_addr and phy->id. */ + ret_val = igb_get_phy_id_82575(hw); + if (ret_val) + return ret_val; + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_phy_info = igb_get_phy_info_m88; + if (phy->id != M88E1111_I_PHY_ID) + phy->ops.get_cable_length = + igb_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = igb_get_cable_length_m88; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + /* Check if this PHY is configured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + igb_check_for_link_media_swap; + } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = igb_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } + if (phy->id == M88E1543_E_PHY_ID) { + ret_val = igb_initialize_M88E1543_phy(hw); + if (ret_val) + goto out; + } + break; + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.get_phy_info = igb_get_phy_info_igp; + phy->ops.get_cable_length = igb_get_cable_length_igp_2; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.force_speed_duplex = + igb_phy_force_speed_duplex_82580; + phy->ops.get_cable_length = igb_get_cable_length_82580; + phy->ops.get_phy_info = igb_get_phy_info_82580; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_cfg_done = igb_get_cfg_done_i210; + phy->ops.get_phy_info = igb_get_phy_info_m88; + phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + break; + case BCM54616_E_PHY_ID: + phy->type = e1000_phy_bcm54616; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u16 size; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; + } + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + + /* NVM Function Pointers */ + nvm->ops.acquire = igb_acquire_nvm_82575; + nvm->ops.release = igb_release_nvm_82575; + nvm->ops.write = igb_write_nvm_spi; + nvm->ops.validate = igb_validate_nvm_checksum; + nvm->ops.update = igb_update_nvm_checksum; + if (nvm->word_size < BIT(15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = igb_validate_nvm_checksum_82580; + nvm->ops.update = igb_update_nvm_checksum_82580; + break; + case e1000_i354: + case e1000_i350: + nvm->ops.validate = igb_validate_nvm_checksum_i350; + nvm->ops.update = igb_update_nvm_checksum_i350; + break; + default: + break; + } + + return 0; +} + +/** + * igb_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ + switch (mac->type) { + case e1000_82576: + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + break; + case e1000_82580: + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + break; + case e1000_i350: + case e1000_i354: + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + break; + default: + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + break; + } + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = igb_reset_hw_82580; + else + mac->ops.reset_hw = igb_reset_hw_82575; + + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; + + } else { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; + } + + if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) + mac->ops.write_vfta = igb_write_vfta_i350; + else + mac->ops.write_vfta = igb_write_vfta; + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + /* enable EEE on i350 parts and later parts */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + else + dev_spec->eee_disable = true; + /* Allow a single clear of the SW semaphore on I210 and newer */ + if (mac->type >= e1000_i210) + dev_spec->clear_semaphore_once = true; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? igb_setup_copper_link_82575 + : igb_setup_serdes_link_82575; + + if (mac->type == e1000_82580 || mac->type == e1000_i350) { + switch (hw->device_id) { + /* feature not supported on these id's */ + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + break; + default: + hw->dev_spec._82575.mas_capable = true; + break; + } + } + return 0; +} + +/** + * igb_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + wrfl(); + + /* Read SFP module data */ + while (timeout) { + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == 0) + break; + msleep(100); + timeout--; + } + if (ret_val != 0) + goto out; + + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != 0) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + hw_dbg("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = 0; +out: + /* Restore I2C interface setting */ + wr32(E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} + +static s32 igb_get_invariants_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + s32 ret_val; + u32 ctrl_ext = 0; + u32 link_mode = 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: + case E1000_DEV_ID_I354_SGMII: + case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = e1000_i354; + break; + default: + return -E1000_ERR_MAC_INIT; + } + + /* Set media type */ + /* The 82575 uses bits 22:23 for link mode. The mode can be changed + * based on the EEPROM. We cannot rely upon device ID. There + * is no distinguishable difference between fiber and internal + * SerDes mode on the 82575. There can be an external PHY attached + * on the SGMII interface. For this, we'll set sgmii_active to true. + */ + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + switch (link_mode) { + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (igb_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + fallthrough; /* for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = igb_set_sfp_media_type_82575(hw); + if ((ret_val != 0) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (dev_spec->sgmii_active) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + wr32(E1000_CTRL_EXT, ctrl_ext); + + break; + default: + break; + } + + /* mac initialization and operations */ + ret_val = igb_init_mac_params_82575(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igb_init_nvm_params_82575(hw); + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + ret_val = igb_init_nvm_params_i210(hw); + break; + default: + break; + } + + if (ret_val) + goto out; + + /* if part supports SR-IOV then initialize mailbox parameters */ + switch (mac->type) { + case e1000_82576: + case e1000_i350: + igb_init_mbx_params_pf(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igb_init_phy_params_82575(hw); + +out: + return ret_val; +} + +/** + * igb_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igb_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static void igb_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +static s32 igb_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + /* Extra read required for some PHY's on i354 */ + if (hw->mac.type == e1000_i354) + igb_get_phy_id(hw); + + /* For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!(igb_sgmii_active_82575(hw))) { + phy->addr = 1; + ret_val = igb_get_phy_id(hw); + goto out; + } + + if (igb_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = rd32(E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + mdic = rd32(E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + ret_val = igb_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + wrfl(); + msleep(300); + + /* The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == 0) { + hw_dbg("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); + /* At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + hw_dbg("PHY address %u was unreadable\n", phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + goto out; + } else { + ret_val = igb_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + wr32(E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + /* This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + hw_dbg("Soft resetting SGMII attached PHY...\n"); + + /* SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + + if (phy->id == M88E1512_E_PHY_ID) + ret_val = igb_initialize_M88E1512_phy(hw); + if (phy->id == M88E1543_E_PHY_ID) + ret_val = igb_initialize_M88E1543_phy(hw); +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + ret_val = igb_acquire_nvm(hw); + + if (ret_val) + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * igb_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_82575(struct e1000_hw *hw) +{ + igb_release_nvm(hw); + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; + + while (i < timeout) { + if (igb_get_hw_semaphore(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); + +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore(hw) != 0) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + + while (timeout) { + if (rd32(E1000_EEMNGCTL) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) + igb_phy_init_script_igp3(hw); + + return 0; +} + +/** + * igb_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = igb_get_speed_and_duplex_copper(hw, speed, + duplex); + + return ret_val; +} + +/** + * igb_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +static s32 igb_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + } else { + ret_val = igb_check_for_copper_link(hw); + } + + return ret_val; +} + +/** + * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +void igb_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = rd32(E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); +} + +/** + * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs, status; + + /* Set up defaults for the return values of this function */ + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; + + /* Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = rd32(E1000_PCS_LSTAT); + + /* The link up bit determines when link is up on autoneg. The sync ok + * gets set once both sides sync up and agree upon link. Stable link + * can be determined by checking for both link up and link sync ok + */ + if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) + *speed = SPEED_1000; + else if (pcs & E1000_PCS_LSTS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = rd32(E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + hw_dbg("2500 Mbs, "); + hw_dbg("Full Duplex\n"); + } + } + + } + + return 0; +} + +/** + * igb_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of fiber serdes, shut down optics and PCS on driver unload + * when management pass thru is not enabled. + **/ +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + if (hw->phy.media_type != e1000_media_type_internal_serdes && + igb_sgmii_active_82575(hw)) + return; + + if (!igb_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = rd32(E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = rd32(E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); + } +} + +/** + * igb_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = igb_set_pcie_completion_timeout(hw); + if (ret_val) + hw_dbg("PCI-E Set completion timeout has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(E1000_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) + igb_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + return ret_val; +} + +/** + * igb_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 igb_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + if ((hw->mac.type >= e1000_i210) && + !(igb_get_flash_presence_i210(hw))) { + ret_val = igb_pll_workaround_i210(hw); + if (ret_val) + return ret_val; + } + + /* Initialize identification LED */ + ret_val = igb_id_led_init(hw); + if (ret_val) { + hw_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + hw_dbg("Initializing the IEEE VLAN\n"); + igb_clear_vfta(hw); + + /* Setup the receive address */ + igb_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igb_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igb_clear_hw_cntrs_82575(hw); + return ret_val; +} + +/** + * igb_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u32 phpm_reg; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + wr32(E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + + ret_val = igb_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ + msleep(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_i210: + case e1000_phy_m88: + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + ret_val = igb_copper_link_setup_m88_gen2(hw); + break; + default: + ret_val = igb_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: + ret_val = igb_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = igb_copper_link_setup_82580(hw); + break; + case e1000_phy_bcm54616: + ret_val = 0; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = igb_setup_copper_link(hw); +out: + return ret_val; +} + +/** + * igb_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; + s32 ret_val = 0; + u16 data; + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return ret_val; + + + /* On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present and turn on I2C */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + ctrl_ext |= E1000_CTRL_I2C_ENA; + wr32(E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { + /* set both sw defined pins */ + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + /* Set switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg |= E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + } + + reg = rd32(E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + fallthrough; + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + wr32(E1000_CTRL, ctrl_reg); + + /* New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + + /* Disable force flow control for autoneg */ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ + anadv_reg = rd32(E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + anadv_reg |= E1000_TXCW_PAUSE; + break; + case e1000_fc_tx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + break; + default: + break; + } + wr32(E1000_PCS_ANADV, anadv_reg); + + hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + wr32(E1000_PCS_LCTL, reg); + + if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) + igb_force_mac_fc(hw); + + return ret_val; +} + +/** + * igb_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +static bool igb_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * igb_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +static s32 igb_reset_init_script_82575(struct e1000_hw *hw) +{ + if (hw->mac.type == e1000_82575) { + hw_dbg("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); + } + + return 0; +} + +/** + * igb_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = igb_check_alt_mac_addr(hw); + if (ret_val) + goto out; + + ret_val = igb_read_mac_addr(hw); + +out: + return ret_val; +} + +/** + * igb_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +void igb_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) + igb_power_down_phy_copper(hw); +} + +/** + * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + igb_clear_hw_cntrs_base(hw); + + rd32(E1000_PRC64); + rd32(E1000_PRC127); + rd32(E1000_PRC255); + rd32(E1000_PRC511); + rd32(E1000_PRC1023); + rd32(E1000_PRC1522); + rd32(E1000_PTC64); + rd32(E1000_PTC127); + rd32(E1000_PTC255); + rd32(E1000_PTC511); + rd32(E1000_PTC1023); + rd32(E1000_PTC1522); + + rd32(E1000_ALGNERRC); + rd32(E1000_RXERRC); + rd32(E1000_TNCRS); + rd32(E1000_CEXTERR); + rd32(E1000_TSCTC); + rd32(E1000_TSCTFC); + + rd32(E1000_MGTPRC); + rd32(E1000_MGTPDC); + rd32(E1000_MGTPTC); + + rd32(E1000_IAC); + rd32(E1000_ICRXOC); + + rd32(E1000_ICRXPTC); + rd32(E1000_ICRXATC); + rd32(E1000_ICTXPTC); + rd32(E1000_ICTXATC); + rd32(E1000_ICTXQEC); + rd32(E1000_ICTXQMTC); + rd32(E1000_ICRXDMTC); + + rd32(E1000_CBTMPC); + rd32(E1000_HTDPMC); + rd32(E1000_CBRMPC); + rd32(E1000_RPTHC); + rd32(E1000_HGPTC); + rd32(E1000_HTCBDPC); + rd32(E1000_HGORCL); + rd32(E1000_HGORCH); + rd32(E1000_HGOTCL); + rd32(E1000_HGOTCH); + rd32(E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + igb_sgmii_active_82575(hw)) + rd32(E1000_SCVPC); +} + +/** + * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable + * @hw: pointer to the HW structure + * + * After rx enable if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void igb_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + wr32(E1000_RFCTL, rfctl); + + if (hw->mac.type != e1000_82575 || + !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all RX queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(E1000_RXDCTL(i)); + wr32(E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = rd32(E1000_RLPML); + wr32(E1000_RLPML, 0); + + rctl = rd32(E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + wr32(E1000_RCTL, temp_rctl); + wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable RX queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(E1000_RXDCTL(i), rxdctl[i]); + wr32(E1000_RCTL, rctl); + wrfl(); + + wr32(E1000_RLPML, rlpml); + wr32(E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(E1000_ROC); + rd32(E1000_RNBC); + rd32(E1000_MPC); +} + +/** + * igb_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = rd32(E1000_GCR); + s32 ret_val = 0; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* if capabilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + wr32(E1000_GCR, gcr); + return ret_val; +} + +/** + * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 reg_val, reg_offset; + + switch (hw->mac.type) { + case e1000_82576: + reg_offset = E1000_DTXSWC; + break; + case e1000_i350: + case e1000_i354: + reg_offset = E1000_TXSWC; + break; + default: + return; + } + + reg_val = rd32(reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + wr32(reg_offset, reg_val); +} + +/** + * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = rd32(E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_DTXSWC, dtxswc); + break; + case e1000_i354: + case e1000_i350: + dtxswc = rd32(E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + +} + +/** + * igb_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = rd32(E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + wr32(E1000_VT_CTL, vt_ctl); +} + +/** + * igb_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 mdicnfg; + u16 nvm_data = 0; + + if (hw->mac.type != e1000_82580) + goto out; + if (!igb_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + mdicnfg = rd32(E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + wr32(E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * igb_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +static s32 igb_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + hw->dev_spec._82575.global_device_reset = false; + + /* due to hw errata, global device reset doesn't always + * work on 82580 + */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ + ctrl = rd32(E1000_CTRL); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 11000); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && + hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && + !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + wr32(E1000_CTRL, ctrl); + wrfl(); + + /* Add delay to insure DEV_RST has time to complete */ + if (global_device_reset) + usleep_range(5000, 6000); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ + wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + ret_val = igb_reset_mdicnfg_82580(hw); + if (ret_val) + hw_dbg("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + /* Release semaphore */ + if (global_device_reset) + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); + + return ret_val; +} + +/** + * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 igb_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * igb_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if checksums compatibility bit is set validate checksums + * for all 4 ports. + */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); + goto out; + } + + if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * __igb_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @address: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * igb_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + return __igb_access_emi_reg(hw, addr, data, true); +} + +/** + * igb_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + u32 ipcnfg, eeer; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + goto out; + ipcnfg = rd32(E1000_IPCNFG); + eeer = rd32(E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + u32 eee_su = rd32(E1000_EEE_SU); + + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & E1000_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + wr32(E1000_IPCNFG, ipcnfg); + wr32(E1000_EEER, eeer); + rd32(E1000_IPCNFG); + rd32(E1000_EEER); +out: + + return 0; +} + +/** + * igb_set_eee_i354 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + if (!hw->dev_spec._82575.eee_disable) { + /* Switch to PHY page 18. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, + phy_data); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + /* Turn on EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } else { + /* Turn off EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED); + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } + +out: + return ret_val; +} + +/** + * igb_get_eee_status_i354 - Get EEE status + * @hw: pointer to the HW structure + * @status: EEE status + * + * Get EEE status by guessing based on whether Tx or Rx LPI indications have + * been received. + **/ +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + /* Check if EEE is supported on this device. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, + E1000_PCS_STATUS_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | + E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: + return ret_val; +} + +#ifdef CONFIG_IGB_HWMON +static const u8 e1000_emc_temp_data[4] = { + E1000_EMC_INTERNAL_DATA, + E1000_EMC_DIODE1_DATA, + E1000_EMC_DIODE2_DATA, + E1000_EMC_DIODE3_DATA +}; +static const u8 e1000_emc_therm_limit[4] = { + E1000_EMC_INTERNAL_THERM_LIMIT, + E1000_EMC_DIODE1_THERM_LIMIT, + E1000_EMC_DIODE2_THERM_LIMIT, + E1000_EMC_DIODE3_THERM_LIMIT +}; + +/** + * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + if (num_sensors > E1000_MAX_SENSORS) + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) + hw->phy.ops.read_i2c_byte(hw, + e1000_emc_temp_data[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } + return 0; +} + +/** + * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = + (rd32(E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = + (rd32(E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> + NVM_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + e1000_emc_therm_limit[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + therm_limit); + + if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return 0; +} + +#endif +static struct e1000_mac_operations e1000_mac_ops_82575 = { + .init_hw = igb_init_hw_82575, + .check_for_link = igb_check_for_link_82575, + .rar_set = igb_rar_set, + .read_mac_addr = igb_read_mac_addr_82575, + .get_speed_and_duplex = igb_get_link_up_info_82575, +#ifdef CONFIG_IGB_HWMON + .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, + .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, +#endif +}; + +static const struct e1000_phy_operations e1000_phy_ops_82575 = { + .acquire = igb_acquire_phy_82575, + .get_cfg_done = igb_get_cfg_done_82575, + .release = igb_release_phy_82575, + .write_i2c_byte = igb_write_i2c_byte, + .read_i2c_byte = igb_read_i2c_byte, +}; + +static struct e1000_nvm_operations e1000_nvm_ops_82575 = { + .acquire = igb_acquire_nvm_82575, + .read = igb_read_nvm_eerd, + .release = igb_release_nvm_82575, + .write = igb_write_nvm_spi, +}; + +const struct e1000_info e1000_82575_info = { + .get_invariants = igb_get_invariants_82575, + .mac_ops = &e1000_mac_ops_82575, + .phy_ops = &e1000_phy_ops_82575, + .nvm_ops = &e1000_nvm_ops_82575, +}; diff --git a/devices/igb/e1000_82575-6.1-ethercat.h b/devices/igb/e1000_82575-6.1-ethercat.h new file mode 100644 index 00000000..63ec253a --- /dev/null +++ b/devices/igb/e1000_82575-6.1-ethercat.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); +void igb_power_up_serdes_link_82575(struct e1000_hw *hw); +void igb_power_down_phy_copper_82575(struct e1000_hw *hw); +void igb_rx_fifo_flush_82575(struct e1000_hw *hw); +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 + +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 + + +#define E1000_MRQC_ENABLE_RSS_MQ 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_ENABLE_VMDQ_RSS_MQ 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + __le16 pkt_info; /* RSS type, Packet type */ + __le16 hdr_info; /* Split Head, buf len */ + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +/* Adv ctxt IPSec ESP len mask */ + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ + +/* Additional DCA related definitions, note change in position of CPUID */ +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE BIT(26) +#define E1000_ETQF_1588 BIT(30) +#define E1000_ETQF_IMM_INT BIT(29) +#define E1000_ETQF_QUEUE_ENABLE BIT(31) +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_ETQF_QUEUE_MASK 0x00070000 +#define E1000_ETQF_ETYPE_MASK 0x0000FFFF + +/* FTQF register bit definitions */ +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 8 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC BIT(28) +#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29) +#define E1000_VT_CTL_VM_REPL_EN BIT(30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT BIT(14) + +#define ALL_QUEUES 0xFFFF + +/* RX packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); +void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); +void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); +u16 igb_rxpbs_adjust_82580(u32 data); +s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); +s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); + +#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define E1000_EMC_INTERNAL_DATA 0x00 +#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +#define E1000_EMC_DIODE1_DATA 0x01 +#define E1000_EMC_DIODE1_THERM_LIMIT 0x19 +#define E1000_EMC_DIODE2_DATA 0x23 +#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A +#define E1000_EMC_DIODE3_DATA 0x2A +#define E1000_EMC_DIODE3_THERM_LIMIT 0x30 +#endif diff --git a/devices/igb/e1000_82575-6.1-orig.c b/devices/igb/e1000_82575-6.1-orig.c new file mode 100644 index 00000000..8d6e44ee --- /dev/null +++ b/devices/igb/e1000_82575-6.1-orig.c @@ -0,0 +1,2927 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* e1000_82575 + * e1000_82576 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +#include "e1000_mac.h" +#include "e1000_82575.h" +#include "e1000_i210.h" +#include "igb.h" + +static s32 igb_get_invariants_82575(struct e1000_hw *); +static s32 igb_acquire_phy_82575(struct e1000_hw *); +static void igb_release_phy_82575(struct e1000_hw *); +static s32 igb_acquire_nvm_82575(struct e1000_hw *); +static void igb_release_nvm_82575(struct e1000_hw *); +static s32 igb_check_for_link_82575(struct e1000_hw *); +static s32 igb_get_cfg_done_82575(struct e1000_hw *); +static s32 igb_init_hw_82575(struct e1000_hw *); +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); +static s32 igb_reset_hw_82575(struct e1000_hw *); +static s32 igb_reset_hw_82580(struct e1000_hw *); +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_setup_copper_link_82575(struct e1000_hw *); +static s32 igb_setup_serdes_link_82575(struct e1000_hw *); +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); +static void igb_clear_hw_cntrs_82575(struct e1000_hw *); +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, + u16 *); +static s32 igb_get_phy_id_82575(struct e1000_hw *); +static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); +static bool igb_sgmii_active_82575(struct e1000_hw *); +static s32 igb_reset_init_script_82575(struct e1000_hw *); +static s32 igb_read_mac_addr_82575(struct e1000_hw *); +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); +static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; + +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * igb_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + int i; + + for (i = 10; i--;) + array_wr32(E1000_VFTA, offset, value); + + wrfl(); + adapter->shadow_vfta[offset] = value; +} + +/** + * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = rd32(E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + reg = rd32(E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +/** + * igb_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + /* Check the copper medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check the other medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + igb_check_for_link_82575(hw); + } else { + igb_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * igb_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u32 ctrl_ext; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + if (igb_sgmii_active_82575(hw)) { + phy->ops.reset = igb_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = igb_phy_hw_reset; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + wr32(E1000_CTRL_EXT, ctrl_ext); + igb_reset_mdicnfg_82580(hw); + + if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; + phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; + break; + default: + phy->ops.read_reg = igb_read_phy_reg_igp; + phy->ops.write_reg = igb_write_phy_reg_igp; + } + } + + /* set lan id */ + hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> + E1000_STATUS_FUNC_SHIFT; + + /* Set phy->phy_addr and phy->id. */ + ret_val = igb_get_phy_id_82575(hw); + if (ret_val) + return ret_val; + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_phy_info = igb_get_phy_info_m88; + if (phy->id != M88E1111_I_PHY_ID) + phy->ops.get_cable_length = + igb_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = igb_get_cable_length_m88; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + /* Check if this PHY is configured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + igb_check_for_link_media_swap; + } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = igb_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } + if (phy->id == M88E1543_E_PHY_ID) { + ret_val = igb_initialize_M88E1543_phy(hw); + if (ret_val) + goto out; + } + break; + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.get_phy_info = igb_get_phy_info_igp; + phy->ops.get_cable_length = igb_get_cable_length_igp_2; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.force_speed_duplex = + igb_phy_force_speed_duplex_82580; + phy->ops.get_cable_length = igb_get_cable_length_82580; + phy->ops.get_phy_info = igb_get_phy_info_82580; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_cfg_done = igb_get_cfg_done_i210; + phy->ops.get_phy_info = igb_get_phy_info_m88; + phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + break; + case BCM54616_E_PHY_ID: + phy->type = e1000_phy_bcm54616; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u16 size; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; + } + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + + /* NVM Function Pointers */ + nvm->ops.acquire = igb_acquire_nvm_82575; + nvm->ops.release = igb_release_nvm_82575; + nvm->ops.write = igb_write_nvm_spi; + nvm->ops.validate = igb_validate_nvm_checksum; + nvm->ops.update = igb_update_nvm_checksum; + if (nvm->word_size < BIT(15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = igb_validate_nvm_checksum_82580; + nvm->ops.update = igb_update_nvm_checksum_82580; + break; + case e1000_i354: + case e1000_i350: + nvm->ops.validate = igb_validate_nvm_checksum_i350; + nvm->ops.update = igb_update_nvm_checksum_i350; + break; + default: + break; + } + + return 0; +} + +/** + * igb_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ + switch (mac->type) { + case e1000_82576: + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + break; + case e1000_82580: + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + break; + case e1000_i350: + case e1000_i354: + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + break; + default: + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + break; + } + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = igb_reset_hw_82580; + else + mac->ops.reset_hw = igb_reset_hw_82575; + + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; + + } else { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; + } + + if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) + mac->ops.write_vfta = igb_write_vfta_i350; + else + mac->ops.write_vfta = igb_write_vfta; + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + /* enable EEE on i350 parts and later parts */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + else + dev_spec->eee_disable = true; + /* Allow a single clear of the SW semaphore on I210 and newer */ + if (mac->type >= e1000_i210) + dev_spec->clear_semaphore_once = true; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? igb_setup_copper_link_82575 + : igb_setup_serdes_link_82575; + + if (mac->type == e1000_82580 || mac->type == e1000_i350) { + switch (hw->device_id) { + /* feature not supported on these id's */ + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + break; + default: + hw->dev_spec._82575.mas_capable = true; + break; + } + } + return 0; +} + +/** + * igb_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + wrfl(); + + /* Read SFP module data */ + while (timeout) { + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == 0) + break; + msleep(100); + timeout--; + } + if (ret_val != 0) + goto out; + + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != 0) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + hw_dbg("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = 0; +out: + /* Restore I2C interface setting */ + wr32(E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} + +static s32 igb_get_invariants_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + s32 ret_val; + u32 ctrl_ext = 0; + u32 link_mode = 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: + case E1000_DEV_ID_I354_SGMII: + case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = e1000_i354; + break; + default: + return -E1000_ERR_MAC_INIT; + } + + /* Set media type */ + /* The 82575 uses bits 22:23 for link mode. The mode can be changed + * based on the EEPROM. We cannot rely upon device ID. There + * is no distinguishable difference between fiber and internal + * SerDes mode on the 82575. There can be an external PHY attached + * on the SGMII interface. For this, we'll set sgmii_active to true. + */ + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + switch (link_mode) { + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (igb_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + fallthrough; /* for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = igb_set_sfp_media_type_82575(hw); + if ((ret_val != 0) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (dev_spec->sgmii_active) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + wr32(E1000_CTRL_EXT, ctrl_ext); + + break; + default: + break; + } + + /* mac initialization and operations */ + ret_val = igb_init_mac_params_82575(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igb_init_nvm_params_82575(hw); + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + ret_val = igb_init_nvm_params_i210(hw); + break; + default: + break; + } + + if (ret_val) + goto out; + + /* if part supports SR-IOV then initialize mailbox parameters */ + switch (mac->type) { + case e1000_82576: + case e1000_i350: + igb_init_mbx_params_pf(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igb_init_phy_params_82575(hw); + +out: + return ret_val; +} + +/** + * igb_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igb_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static void igb_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +static s32 igb_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + /* Extra read required for some PHY's on i354 */ + if (hw->mac.type == e1000_i354) + igb_get_phy_id(hw); + + /* For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!(igb_sgmii_active_82575(hw))) { + phy->addr = 1; + ret_val = igb_get_phy_id(hw); + goto out; + } + + if (igb_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = rd32(E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + mdic = rd32(E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + ret_val = igb_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + wrfl(); + msleep(300); + + /* The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == 0) { + hw_dbg("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); + /* At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + hw_dbg("PHY address %u was unreadable\n", phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + goto out; + } else { + ret_val = igb_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + wr32(E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + /* This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + hw_dbg("Soft resetting SGMII attached PHY...\n"); + + /* SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + + if (phy->id == M88E1512_E_PHY_ID) + ret_val = igb_initialize_M88E1512_phy(hw); + if (phy->id == M88E1543_E_PHY_ID) + ret_val = igb_initialize_M88E1543_phy(hw); +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + ret_val = igb_acquire_nvm(hw); + + if (ret_val) + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * igb_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_82575(struct e1000_hw *hw) +{ + igb_release_nvm(hw); + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; + + while (i < timeout) { + if (igb_get_hw_semaphore(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); + +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore(hw) != 0) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + + while (timeout) { + if (rd32(E1000_EEMNGCTL) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) + igb_phy_init_script_igp3(hw); + + return 0; +} + +/** + * igb_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = igb_get_speed_and_duplex_copper(hw, speed, + duplex); + + return ret_val; +} + +/** + * igb_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +static s32 igb_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + } else { + ret_val = igb_check_for_copper_link(hw); + } + + return ret_val; +} + +/** + * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +void igb_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = rd32(E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); +} + +/** + * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs, status; + + /* Set up defaults for the return values of this function */ + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; + + /* Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = rd32(E1000_PCS_LSTAT); + + /* The link up bit determines when link is up on autoneg. The sync ok + * gets set once both sides sync up and agree upon link. Stable link + * can be determined by checking for both link up and link sync ok + */ + if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) + *speed = SPEED_1000; + else if (pcs & E1000_PCS_LSTS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = rd32(E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + hw_dbg("2500 Mbs, "); + hw_dbg("Full Duplex\n"); + } + } + + } + + return 0; +} + +/** + * igb_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of fiber serdes, shut down optics and PCS on driver unload + * when management pass thru is not enabled. + **/ +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + if (hw->phy.media_type != e1000_media_type_internal_serdes && + igb_sgmii_active_82575(hw)) + return; + + if (!igb_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = rd32(E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = rd32(E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); + } +} + +/** + * igb_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = igb_set_pcie_completion_timeout(hw); + if (ret_val) + hw_dbg("PCI-E Set completion timeout has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(E1000_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) + igb_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + return ret_val; +} + +/** + * igb_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 igb_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + if ((hw->mac.type >= e1000_i210) && + !(igb_get_flash_presence_i210(hw))) { + ret_val = igb_pll_workaround_i210(hw); + if (ret_val) + return ret_val; + } + + /* Initialize identification LED */ + ret_val = igb_id_led_init(hw); + if (ret_val) { + hw_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + hw_dbg("Initializing the IEEE VLAN\n"); + igb_clear_vfta(hw); + + /* Setup the receive address */ + igb_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igb_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igb_clear_hw_cntrs_82575(hw); + return ret_val; +} + +/** + * igb_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u32 phpm_reg; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + wr32(E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + + ret_val = igb_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ + msleep(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_i210: + case e1000_phy_m88: + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + ret_val = igb_copper_link_setup_m88_gen2(hw); + break; + default: + ret_val = igb_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: + ret_val = igb_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = igb_copper_link_setup_82580(hw); + break; + case e1000_phy_bcm54616: + ret_val = 0; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = igb_setup_copper_link(hw); +out: + return ret_val; +} + +/** + * igb_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; + s32 ret_val = 0; + u16 data; + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return ret_val; + + + /* On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present and turn on I2C */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + ctrl_ext |= E1000_CTRL_I2C_ENA; + wr32(E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { + /* set both sw defined pins */ + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + /* Set switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg |= E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + } + + reg = rd32(E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + fallthrough; + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + wr32(E1000_CTRL, ctrl_reg); + + /* New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + + /* Disable force flow control for autoneg */ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ + anadv_reg = rd32(E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + anadv_reg |= E1000_TXCW_PAUSE; + break; + case e1000_fc_tx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + break; + default: + break; + } + wr32(E1000_PCS_ANADV, anadv_reg); + + hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + wr32(E1000_PCS_LCTL, reg); + + if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) + igb_force_mac_fc(hw); + + return ret_val; +} + +/** + * igb_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +static bool igb_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * igb_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +static s32 igb_reset_init_script_82575(struct e1000_hw *hw) +{ + if (hw->mac.type == e1000_82575) { + hw_dbg("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); + } + + return 0; +} + +/** + * igb_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = igb_check_alt_mac_addr(hw); + if (ret_val) + goto out; + + ret_val = igb_read_mac_addr(hw); + +out: + return ret_val; +} + +/** + * igb_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +void igb_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) + igb_power_down_phy_copper(hw); +} + +/** + * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + igb_clear_hw_cntrs_base(hw); + + rd32(E1000_PRC64); + rd32(E1000_PRC127); + rd32(E1000_PRC255); + rd32(E1000_PRC511); + rd32(E1000_PRC1023); + rd32(E1000_PRC1522); + rd32(E1000_PTC64); + rd32(E1000_PTC127); + rd32(E1000_PTC255); + rd32(E1000_PTC511); + rd32(E1000_PTC1023); + rd32(E1000_PTC1522); + + rd32(E1000_ALGNERRC); + rd32(E1000_RXERRC); + rd32(E1000_TNCRS); + rd32(E1000_CEXTERR); + rd32(E1000_TSCTC); + rd32(E1000_TSCTFC); + + rd32(E1000_MGTPRC); + rd32(E1000_MGTPDC); + rd32(E1000_MGTPTC); + + rd32(E1000_IAC); + rd32(E1000_ICRXOC); + + rd32(E1000_ICRXPTC); + rd32(E1000_ICRXATC); + rd32(E1000_ICTXPTC); + rd32(E1000_ICTXATC); + rd32(E1000_ICTXQEC); + rd32(E1000_ICTXQMTC); + rd32(E1000_ICRXDMTC); + + rd32(E1000_CBTMPC); + rd32(E1000_HTDPMC); + rd32(E1000_CBRMPC); + rd32(E1000_RPTHC); + rd32(E1000_HGPTC); + rd32(E1000_HTCBDPC); + rd32(E1000_HGORCL); + rd32(E1000_HGORCH); + rd32(E1000_HGOTCL); + rd32(E1000_HGOTCH); + rd32(E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + igb_sgmii_active_82575(hw)) + rd32(E1000_SCVPC); +} + +/** + * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable + * @hw: pointer to the HW structure + * + * After rx enable if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void igb_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + wr32(E1000_RFCTL, rfctl); + + if (hw->mac.type != e1000_82575 || + !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all RX queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(E1000_RXDCTL(i)); + wr32(E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = rd32(E1000_RLPML); + wr32(E1000_RLPML, 0); + + rctl = rd32(E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + wr32(E1000_RCTL, temp_rctl); + wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable RX queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(E1000_RXDCTL(i), rxdctl[i]); + wr32(E1000_RCTL, rctl); + wrfl(); + + wr32(E1000_RLPML, rlpml); + wr32(E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(E1000_ROC); + rd32(E1000_RNBC); + rd32(E1000_MPC); +} + +/** + * igb_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = rd32(E1000_GCR); + s32 ret_val = 0; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* if capabilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + wr32(E1000_GCR, gcr); + return ret_val; +} + +/** + * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 reg_val, reg_offset; + + switch (hw->mac.type) { + case e1000_82576: + reg_offset = E1000_DTXSWC; + break; + case e1000_i350: + case e1000_i354: + reg_offset = E1000_TXSWC; + break; + default: + return; + } + + reg_val = rd32(reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + wr32(reg_offset, reg_val); +} + +/** + * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = rd32(E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_DTXSWC, dtxswc); + break; + case e1000_i354: + case e1000_i350: + dtxswc = rd32(E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + +} + +/** + * igb_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = rd32(E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + wr32(E1000_VT_CTL, vt_ctl); +} + +/** + * igb_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 mdicnfg; + u16 nvm_data = 0; + + if (hw->mac.type != e1000_82580) + goto out; + if (!igb_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + mdicnfg = rd32(E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + wr32(E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * igb_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +static s32 igb_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + hw->dev_spec._82575.global_device_reset = false; + + /* due to hw errata, global device reset doesn't always + * work on 82580 + */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ + ctrl = rd32(E1000_CTRL); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 11000); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && + hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && + !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + wr32(E1000_CTRL, ctrl); + wrfl(); + + /* Add delay to insure DEV_RST has time to complete */ + if (global_device_reset) + usleep_range(5000, 6000); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ + wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + ret_val = igb_reset_mdicnfg_82580(hw); + if (ret_val) + hw_dbg("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + /* Release semaphore */ + if (global_device_reset) + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); + + return ret_val; +} + +/** + * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 igb_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * igb_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if checksums compatibility bit is set validate checksums + * for all 4 ports. + */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); + goto out; + } + + if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * __igb_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @address: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * igb_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + return __igb_access_emi_reg(hw, addr, data, true); +} + +/** + * igb_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + u32 ipcnfg, eeer; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + goto out; + ipcnfg = rd32(E1000_IPCNFG); + eeer = rd32(E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + u32 eee_su = rd32(E1000_EEE_SU); + + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & E1000_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + wr32(E1000_IPCNFG, ipcnfg); + wr32(E1000_EEER, eeer); + rd32(E1000_IPCNFG); + rd32(E1000_EEER); +out: + + return 0; +} + +/** + * igb_set_eee_i354 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + if (!hw->dev_spec._82575.eee_disable) { + /* Switch to PHY page 18. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, + phy_data); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + /* Turn on EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } else { + /* Turn off EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED); + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } + +out: + return ret_val; +} + +/** + * igb_get_eee_status_i354 - Get EEE status + * @hw: pointer to the HW structure + * @status: EEE status + * + * Get EEE status by guessing based on whether Tx or Rx LPI indications have + * been received. + **/ +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + /* Check if EEE is supported on this device. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, + E1000_PCS_STATUS_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | + E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: + return ret_val; +} + +#ifdef CONFIG_IGB_HWMON +static const u8 e1000_emc_temp_data[4] = { + E1000_EMC_INTERNAL_DATA, + E1000_EMC_DIODE1_DATA, + E1000_EMC_DIODE2_DATA, + E1000_EMC_DIODE3_DATA +}; +static const u8 e1000_emc_therm_limit[4] = { + E1000_EMC_INTERNAL_THERM_LIMIT, + E1000_EMC_DIODE1_THERM_LIMIT, + E1000_EMC_DIODE2_THERM_LIMIT, + E1000_EMC_DIODE3_THERM_LIMIT +}; + +/** + * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + if (num_sensors > E1000_MAX_SENSORS) + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) + hw->phy.ops.read_i2c_byte(hw, + e1000_emc_temp_data[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } + return 0; +} + +/** + * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = + (rd32(E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = + (rd32(E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> + NVM_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + e1000_emc_therm_limit[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + therm_limit); + + if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return 0; +} + +#endif +static struct e1000_mac_operations e1000_mac_ops_82575 = { + .init_hw = igb_init_hw_82575, + .check_for_link = igb_check_for_link_82575, + .rar_set = igb_rar_set, + .read_mac_addr = igb_read_mac_addr_82575, + .get_speed_and_duplex = igb_get_link_up_info_82575, +#ifdef CONFIG_IGB_HWMON + .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, + .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, +#endif +}; + +static const struct e1000_phy_operations e1000_phy_ops_82575 = { + .acquire = igb_acquire_phy_82575, + .get_cfg_done = igb_get_cfg_done_82575, + .release = igb_release_phy_82575, + .write_i2c_byte = igb_write_i2c_byte, + .read_i2c_byte = igb_read_i2c_byte, +}; + +static struct e1000_nvm_operations e1000_nvm_ops_82575 = { + .acquire = igb_acquire_nvm_82575, + .read = igb_read_nvm_eerd, + .release = igb_release_nvm_82575, + .write = igb_write_nvm_spi, +}; + +const struct e1000_info e1000_82575_info = { + .get_invariants = igb_get_invariants_82575, + .mac_ops = &e1000_mac_ops_82575, + .phy_ops = &e1000_phy_ops_82575, + .nvm_ops = &e1000_nvm_ops_82575, +}; + diff --git a/devices/igb/e1000_82575-6.1-orig.h b/devices/igb/e1000_82575-6.1-orig.h new file mode 100644 index 00000000..63ec253a --- /dev/null +++ b/devices/igb/e1000_82575-6.1-orig.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); +void igb_power_up_serdes_link_82575(struct e1000_hw *hw); +void igb_power_down_phy_copper_82575(struct e1000_hw *hw); +void igb_rx_fifo_flush_82575(struct e1000_hw *hw); +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 + +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 + + +#define E1000_MRQC_ENABLE_RSS_MQ 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_ENABLE_VMDQ_RSS_MQ 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + __le16 pkt_info; /* RSS type, Packet type */ + __le16 hdr_info; /* Split Head, buf len */ + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +/* Adv ctxt IPSec ESP len mask */ + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ + +/* Additional DCA related definitions, note change in position of CPUID */ +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE BIT(26) +#define E1000_ETQF_1588 BIT(30) +#define E1000_ETQF_IMM_INT BIT(29) +#define E1000_ETQF_QUEUE_ENABLE BIT(31) +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_ETQF_QUEUE_MASK 0x00070000 +#define E1000_ETQF_ETYPE_MASK 0x0000FFFF + +/* FTQF register bit definitions */ +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 8 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC BIT(28) +#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29) +#define E1000_VT_CTL_VM_REPL_EN BIT(30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT BIT(14) + +#define ALL_QUEUES 0xFFFF + +/* RX packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); +void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); +void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); +u16 igb_rxpbs_adjust_82580(u32 data); +s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); +s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); + +#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define E1000_EMC_INTERNAL_DATA 0x00 +#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +#define E1000_EMC_DIODE1_DATA 0x01 +#define E1000_EMC_DIODE1_THERM_LIMIT 0x19 +#define E1000_EMC_DIODE2_DATA 0x23 +#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A +#define E1000_EMC_DIODE3_DATA 0x2A +#define E1000_EMC_DIODE3_THERM_LIMIT 0x30 +#endif diff --git a/devices/igb/e1000_defines-5.14-ethercat.h b/devices/igb/e1000_defines-5.14-ethercat.h new file mode 100644 index 00000000..ca542977 --- /dev/null +++ b/devices/igb/e1000_defines-5.14-ethercat.h @@ -0,0 +1,1078 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +/* Wake Up Status */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact */ +#define E1000_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define E1000_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + E1000_WUS_EX | \ + E1000_WUS_ARPD | \ + E1000_WUS_IPV4 | \ + E1000_WUS_IPV6 | \ + E1000_WUS_NSD) + +/* Wake Up Packet Length */ +#define E1000_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define E1000_WUPM_BYTES 128 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ + +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +/* Interrupt delay cancellation */ +/* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 +/* Interrupt acknowledge Auto-mask */ +/* Clear Interrupt timers after IMS clear */ +/* packet buffer parity error detection enabled */ +/* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ +/* Enable Neighbor Discovery Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 + +/* FACTPS Definitions */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +/* Defined polarity of Dock/Undock indication in SDP[0] */ +/* Reset both PHY ports, through PHYRST_N pin */ +/* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ +#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +/* Initiate an interrupt to manageability engine */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_CONNSW_PHYSD 0x400 +#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_SERDESD 0x200 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +/* Change in Dock/Undock state. Clear on write '0'. */ +/* Status of Master requests. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 +/* BMC external code execution disabled */ + +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +/* Constants used to intrepret the masked PCI-X bus speed. */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_IVRT 0x00000040 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +/* Extended desc bits for Linksec and timesync */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* DMA Coalescing register fields */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */ +#define E1000_DMACR_DMACTHR_SHIFT 16 +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */ +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */ + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */ + +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */ + +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */ +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ + +/* Timestamp in Rx buffer */ +#define E1000_RXPBS_CFG_TS_EN 0x80000000 + +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_RXPBSIZE_MASK 0x0000003F +#define I210_RXPBSIZE_PB_30KB 0x0000001E +#define I210_RXPBSIZE_PB_32KB 0x00000020 +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define I210_TXPBSIZE_MASK 0xC0FFFFFF +#define I210_TXPBSIZE_PB0_6KB (6 << 0) +#define I210_TXPBSIZE_PB1_6KB (6 << 6) +#define I210_TXPBSIZE_PB2_6KB (6 << 12) +#define I210_TXPBSIZE_PB3_6KB (6 << 18) + +#define I210_DTXMXPKTSZ_DEFAULT 0x00000098 + +#define I210_SR_QUEUES_NUM 2 + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +/* PBA constants */ +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_64K 0x0040 /* 64KB */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +/* LAN connected device generates an interrupt */ +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC | \ + E1000_IMS_DOUTSYNC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ + +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ + +/* Extended Interrupt Cause Set */ +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ + + +/* Transmit Descriptor Control */ +/* Enable the counting of descriptors still to be processed. */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* Transmit Config Word */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAH_ASEL_SRC_ADDR 0x00010000 +#define E1000_RAH_QSEL_ENABLE 0x10000000 +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_POOL_MASK 0x03FC0000 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 19 +#define E1000_ERR_I2C 20 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 + +/* Time Sync Interrupt Cause/Mask Register Bits */ + +#define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */ +#define TSINTR_RXTS BIT(2) /* Receive Timestamp. */ +#define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ BIT(7) /* Time Adjust Done. */ + +#define TSYNC_INTERRUPTS TSINTR_TXTS +#define E1000_TSICR_TXTS TSINTR_TXTS + +/* TSAUXC Configuration Bits */ +#define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */ + +/* SDP Configuration Bits */ +#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 + +/* PCI Express Control */ +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +/* mPHY Address Control and Data Registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ + +/* mPHY PCS CLK Register */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ +/* mPHY Near End Digital Loopback Override Bit */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* Autoneg Expansion Register */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + + +/* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DATA 16 +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ +#define NVM_VERSION 0x0005 +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +#define NVM_ETS_CFG 0x003E +#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 +#define NVM_ETS_LTHRES_DELTA_SHIFT 6 +#define NVM_ETS_TYPE_MASK 0x0038 +#define NVM_ETS_TYPE_SHIFT 3 +#define NVM_ETS_TYPE_EMC 0x000 +#define NVM_ETS_NUM_SENSORS_MASK 0x0007 +#define NVM_ETS_DATA_LOC_MASK 0x3C00 +#define NVM_ETS_DATA_LOC_SHIFT 10 +#define NVM_ETS_DATA_INDEX_MASK 0x0300 +#define NVM_ETS_DATA_INDEX_SHIFT 8 +#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ + +/* length of string needed to store part num */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - Microwire */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIE_DEVICE_CONTROL2 0x28 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define M88_VENDOR 0x0141 +#define I210_I_PHY_ID 0x01410C00 +#define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 +#define BCM54616_E_PHY_ID 0x03625D10 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +/* 1=CLK125 low, 0=CLK125 toggling */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold + * 0=Normal 10BASE-T Rx Threshold + */ +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +/* 1 = Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel i347-AT4 Registers */ + +#define I347AT4_PCDL0 0x10 /* Pair 0 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL1 0x11 /* Pair 1 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL2 0x12 /* Pair 2 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL3 0x13 /* Pair 3 PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* i347-AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* i347-AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* Marvell 1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* Thermal Sensor */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ + +/* Energy Efficient Ethernet */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ +#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_M88E1543_FIBER_CTRL 0x0 +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +/* DMA Coalescing register fields */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */ + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +#define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) +#define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define E1000_VLAPQF_QUEUE_MASK 0x03 + +/* TX Qav Control fields */ +#define E1000_TQAVCTRL_XMIT_MODE BIT(0) +#define E1000_TQAVCTRL_DATAFETCHARB BIT(4) +#define E1000_TQAVCTRL_DATATRANARB BIT(8) +#define E1000_TQAVCTRL_DATATRANTIM BIT(9) +#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10) +/* Fetch Time Delta - bits 31:16 + * + * This field holds the value to be reduced from the launch time for + * fetch time decision. The FetchTimeDelta value is defined in 32 ns + * granularity. + * + * This field is 16 bits wide, and so the maximum value is: + * + * 65535 * 32 = 2097120 ~= 2.1 msec + * + * XXX: We are configuring the max value here since we couldn't come up + * with a reason for not doing so. + */ +#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16) + +/* TX Qav Credit Control fields */ +#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define E1000_TQAVCC_QUEUEMODE BIT(31) + +/* Transmit Descriptor Control fields */ +#define E1000_TXDCTL_PRIORITY BIT(27) + +#endif diff --git a/devices/igb/e1000_defines-5.14-orig.h b/devices/igb/e1000_defines-5.14-orig.h new file mode 100644 index 00000000..ca542977 --- /dev/null +++ b/devices/igb/e1000_defines-5.14-orig.h @@ -0,0 +1,1078 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +/* Wake Up Status */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact */ +#define E1000_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define E1000_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + E1000_WUS_EX | \ + E1000_WUS_ARPD | \ + E1000_WUS_IPV4 | \ + E1000_WUS_IPV6 | \ + E1000_WUS_NSD) + +/* Wake Up Packet Length */ +#define E1000_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define E1000_WUPM_BYTES 128 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ + +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +/* Interrupt delay cancellation */ +/* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 +/* Interrupt acknowledge Auto-mask */ +/* Clear Interrupt timers after IMS clear */ +/* packet buffer parity error detection enabled */ +/* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ +/* Enable Neighbor Discovery Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 + +/* FACTPS Definitions */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +/* Defined polarity of Dock/Undock indication in SDP[0] */ +/* Reset both PHY ports, through PHYRST_N pin */ +/* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ +#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +/* Initiate an interrupt to manageability engine */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_CONNSW_PHYSD 0x400 +#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_SERDESD 0x200 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +/* Change in Dock/Undock state. Clear on write '0'. */ +/* Status of Master requests. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 +/* BMC external code execution disabled */ + +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +/* Constants used to intrepret the masked PCI-X bus speed. */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_IVRT 0x00000040 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +/* Extended desc bits for Linksec and timesync */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* DMA Coalescing register fields */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */ +#define E1000_DMACR_DMACTHR_SHIFT 16 +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */ +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */ + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */ + +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */ + +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */ +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ + +/* Timestamp in Rx buffer */ +#define E1000_RXPBS_CFG_TS_EN 0x80000000 + +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_RXPBSIZE_MASK 0x0000003F +#define I210_RXPBSIZE_PB_30KB 0x0000001E +#define I210_RXPBSIZE_PB_32KB 0x00000020 +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define I210_TXPBSIZE_MASK 0xC0FFFFFF +#define I210_TXPBSIZE_PB0_6KB (6 << 0) +#define I210_TXPBSIZE_PB1_6KB (6 << 6) +#define I210_TXPBSIZE_PB2_6KB (6 << 12) +#define I210_TXPBSIZE_PB3_6KB (6 << 18) + +#define I210_DTXMXPKTSZ_DEFAULT 0x00000098 + +#define I210_SR_QUEUES_NUM 2 + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +/* PBA constants */ +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_64K 0x0040 /* 64KB */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +/* LAN connected device generates an interrupt */ +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC | \ + E1000_IMS_DOUTSYNC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ + +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ + +/* Extended Interrupt Cause Set */ +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ + + +/* Transmit Descriptor Control */ +/* Enable the counting of descriptors still to be processed. */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* Transmit Config Word */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAH_ASEL_SRC_ADDR 0x00010000 +#define E1000_RAH_QSEL_ENABLE 0x10000000 +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_POOL_MASK 0x03FC0000 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 19 +#define E1000_ERR_I2C 20 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 + +/* Time Sync Interrupt Cause/Mask Register Bits */ + +#define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */ +#define TSINTR_RXTS BIT(2) /* Receive Timestamp. */ +#define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ BIT(7) /* Time Adjust Done. */ + +#define TSYNC_INTERRUPTS TSINTR_TXTS +#define E1000_TSICR_TXTS TSINTR_TXTS + +/* TSAUXC Configuration Bits */ +#define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */ + +/* SDP Configuration Bits */ +#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 + +/* PCI Express Control */ +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +/* mPHY Address Control and Data Registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ + +/* mPHY PCS CLK Register */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ +/* mPHY Near End Digital Loopback Override Bit */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* Autoneg Expansion Register */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + + +/* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DATA 16 +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ +#define NVM_VERSION 0x0005 +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +#define NVM_ETS_CFG 0x003E +#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 +#define NVM_ETS_LTHRES_DELTA_SHIFT 6 +#define NVM_ETS_TYPE_MASK 0x0038 +#define NVM_ETS_TYPE_SHIFT 3 +#define NVM_ETS_TYPE_EMC 0x000 +#define NVM_ETS_NUM_SENSORS_MASK 0x0007 +#define NVM_ETS_DATA_LOC_MASK 0x3C00 +#define NVM_ETS_DATA_LOC_SHIFT 10 +#define NVM_ETS_DATA_INDEX_MASK 0x0300 +#define NVM_ETS_DATA_INDEX_SHIFT 8 +#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ + +/* length of string needed to store part num */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - Microwire */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIE_DEVICE_CONTROL2 0x28 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define M88_VENDOR 0x0141 +#define I210_I_PHY_ID 0x01410C00 +#define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 +#define BCM54616_E_PHY_ID 0x03625D10 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +/* 1=CLK125 low, 0=CLK125 toggling */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold + * 0=Normal 10BASE-T Rx Threshold + */ +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +/* 1 = Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel i347-AT4 Registers */ + +#define I347AT4_PCDL0 0x10 /* Pair 0 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL1 0x11 /* Pair 1 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL2 0x12 /* Pair 2 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL3 0x13 /* Pair 3 PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* i347-AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* i347-AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* Marvell 1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* Thermal Sensor */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ + +/* Energy Efficient Ethernet */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ +#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_M88E1543_FIBER_CTRL 0x0 +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +/* DMA Coalescing register fields */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */ + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +#define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) +#define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define E1000_VLAPQF_QUEUE_MASK 0x03 + +/* TX Qav Control fields */ +#define E1000_TQAVCTRL_XMIT_MODE BIT(0) +#define E1000_TQAVCTRL_DATAFETCHARB BIT(4) +#define E1000_TQAVCTRL_DATATRANARB BIT(8) +#define E1000_TQAVCTRL_DATATRANTIM BIT(9) +#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10) +/* Fetch Time Delta - bits 31:16 + * + * This field holds the value to be reduced from the launch time for + * fetch time decision. The FetchTimeDelta value is defined in 32 ns + * granularity. + * + * This field is 16 bits wide, and so the maximum value is: + * + * 65535 * 32 = 2097120 ~= 2.1 msec + * + * XXX: We are configuring the max value here since we couldn't come up + * with a reason for not doing so. + */ +#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16) + +/* TX Qav Credit Control fields */ +#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define E1000_TQAVCC_QUEUEMODE BIT(31) + +/* Transmit Descriptor Control fields */ +#define E1000_TXDCTL_PRIORITY BIT(27) + +#endif diff --git a/devices/igb/e1000_defines-6.1-ethercat.h b/devices/igb/e1000_defines-6.1-ethercat.h new file mode 100644 index 00000000..fa028928 --- /dev/null +++ b/devices/igb/e1000_defines-6.1-ethercat.h @@ -0,0 +1,1075 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +/* Wake Up Status */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact */ +#define E1000_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define E1000_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + E1000_WUS_EX | \ + E1000_WUS_ARPD | \ + E1000_WUS_IPV4 | \ + E1000_WUS_IPV6 | \ + E1000_WUS_NSD) + +/* Wake Up Packet Length */ +#define E1000_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define E1000_WUPM_BYTES 128 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ + +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +/* Interrupt delay cancellation */ +/* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 +/* Interrupt acknowledge Auto-mask */ +/* Clear Interrupt timers after IMS clear */ +/* packet buffer parity error detection enabled */ +/* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ +/* Enable Neighbor Discovery Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 + +/* FACTPS Definitions */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +/* Defined polarity of Dock/Undock indication in SDP[0] */ +/* Reset both PHY ports, through PHYRST_N pin */ +/* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ +#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +/* Initiate an interrupt to manageability engine */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_CONNSW_PHYSD 0x400 +#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_SERDESD 0x200 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +/* Change in Dock/Undock state. Clear on write '0'. */ +/* Status of Master requests. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 +/* BMC external code execution disabled */ + +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +/* Constants used to intrepret the masked PCI-X bus speed. */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_IVRT 0x00000040 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +/* Extended desc bits for Linksec and timesync */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* DMA Coalescing register fields */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */ +#define E1000_DMACR_DMACTHR_SHIFT 16 +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */ +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */ + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */ + +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */ + +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */ +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ + +/* Timestamp in Rx buffer */ +#define E1000_RXPBS_CFG_TS_EN 0x80000000 + +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_RXPBSIZE_MASK 0x0000003F +#define I210_RXPBSIZE_PB_30KB 0x0000001E +#define I210_RXPBSIZE_PB_32KB 0x00000020 +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define I210_TXPBSIZE_MASK 0xC0FFFFFF +#define I210_TXPBSIZE_PB0_6KB (6 << 0) +#define I210_TXPBSIZE_PB1_6KB (6 << 6) +#define I210_TXPBSIZE_PB2_6KB (6 << 12) +#define I210_TXPBSIZE_PB3_6KB (6 << 18) + +#define I210_DTXMXPKTSZ_DEFAULT 0x00000098 + +#define I210_SR_QUEUES_NUM 2 + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +/* PBA constants */ +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_64K 0x0040 /* 64KB */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +/* LAN connected device generates an interrupt */ +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC | \ + E1000_IMS_DOUTSYNC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ + +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ + +/* Extended Interrupt Cause Set */ +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ + + +/* Transmit Descriptor Control */ +/* Enable the counting of descriptors still to be processed. */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* Transmit Config Word */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAH_ASEL_SRC_ADDR 0x00010000 +#define E1000_RAH_QSEL_ENABLE 0x10000000 +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_POOL_MASK 0x03FC0000 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 19 +#define E1000_ERR_I2C 20 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 + +/* Time Sync Interrupt Cause/Mask Register Bits */ + +#define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */ +#define TSINTR_RXTS BIT(2) /* Receive Timestamp. */ +#define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ BIT(7) /* Time Adjust Done. */ + +#define TSYNC_INTERRUPTS TSINTR_TXTS +#define E1000_TSICR_TXTS TSINTR_TXTS + +/* TSAUXC Configuration Bits */ +#define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */ + +/* SDP Configuration Bits */ +#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 + +/* PCI Express Control */ +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +/* mPHY Address Control and Data Registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ + +/* mPHY PCS CLK Register */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ +/* mPHY Near End Digital Loopback Override Bit */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* Autoneg Expansion Register */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + + +/* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DATA 16 +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ +#define NVM_VERSION 0x0005 +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +#define NVM_ETS_CFG 0x003E +#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 +#define NVM_ETS_LTHRES_DELTA_SHIFT 6 +#define NVM_ETS_TYPE_MASK 0x0038 +#define NVM_ETS_TYPE_SHIFT 3 +#define NVM_ETS_TYPE_EMC 0x000 +#define NVM_ETS_NUM_SENSORS_MASK 0x0007 +#define NVM_ETS_DATA_LOC_MASK 0x3C00 +#define NVM_ETS_DATA_LOC_SHIFT 10 +#define NVM_ETS_DATA_INDEX_MASK 0x0300 +#define NVM_ETS_DATA_INDEX_SHIFT 8 +#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ + +/* length of string needed to store part num */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - Microwire */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIE_DEVICE_CONTROL2 0x28 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define M88_VENDOR 0x0141 +#define I210_I_PHY_ID 0x01410C00 +#define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 +#define BCM54616_E_PHY_ID 0x03625D10 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +/* 1=CLK125 low, 0=CLK125 toggling */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold + * 0=Normal 10BASE-T Rx Threshold + */ +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +/* 1 = Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel i347-AT4 Registers */ + +#define I347AT4_PCDL0 0x10 /* Pair 0 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL1 0x11 /* Pair 1 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL2 0x12 /* Pair 2 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL3 0x13 /* Pair 3 PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* i347-AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* i347-AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* Marvell 1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* Thermal Sensor */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ + +/* Energy Efficient Ethernet */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ +#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_M88E1543_FIBER_CTRL 0x0 +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +#define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) +#define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define E1000_VLAPQF_QUEUE_MASK 0x03 + +/* TX Qav Control fields */ +#define E1000_TQAVCTRL_XMIT_MODE BIT(0) +#define E1000_TQAVCTRL_DATAFETCHARB BIT(4) +#define E1000_TQAVCTRL_DATATRANARB BIT(8) +#define E1000_TQAVCTRL_DATATRANTIM BIT(9) +#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10) +/* Fetch Time Delta - bits 31:16 + * + * This field holds the value to be reduced from the launch time for + * fetch time decision. The FetchTimeDelta value is defined in 32 ns + * granularity. + * + * This field is 16 bits wide, and so the maximum value is: + * + * 65535 * 32 = 2097120 ~= 2.1 msec + * + * XXX: We are configuring the max value here since we couldn't come up + * with a reason for not doing so. + */ +#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16) + +/* TX Qav Credit Control fields */ +#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define E1000_TQAVCC_QUEUEMODE BIT(31) + +/* Transmit Descriptor Control fields */ +#define E1000_TXDCTL_PRIORITY BIT(27) + +#endif diff --git a/devices/igb/e1000_defines-6.1-orig.h b/devices/igb/e1000_defines-6.1-orig.h new file mode 100644 index 00000000..fa028928 --- /dev/null +++ b/devices/igb/e1000_defines-6.1-orig.h @@ -0,0 +1,1075 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +/* Wake Up Status */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact */ +#define E1000_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define E1000_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + E1000_WUS_EX | \ + E1000_WUS_ARPD | \ + E1000_WUS_IPV4 | \ + E1000_WUS_IPV6 | \ + E1000_WUS_NSD) + +/* Wake Up Packet Length */ +#define E1000_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define E1000_WUPM_BYTES 128 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ + +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +/* Interrupt delay cancellation */ +/* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 +/* Interrupt acknowledge Auto-mask */ +/* Clear Interrupt timers after IMS clear */ +/* packet buffer parity error detection enabled */ +/* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ +/* Enable Neighbor Discovery Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 + +/* FACTPS Definitions */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +/* Defined polarity of Dock/Undock indication in SDP[0] */ +/* Reset both PHY ports, through PHYRST_N pin */ +/* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ +#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +/* Initiate an interrupt to manageability engine */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_CONNSW_PHYSD 0x400 +#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_SERDESD 0x200 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +/* Change in Dock/Undock state. Clear on write '0'. */ +/* Status of Master requests. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 +/* BMC external code execution disabled */ + +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +/* Constants used to intrepret the masked PCI-X bus speed. */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_IVRT 0x00000040 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +/* Extended desc bits for Linksec and timesync */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* DMA Coalescing register fields */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */ +#define E1000_DMACR_DMACTHR_SHIFT 16 +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */ +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */ + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */ + +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */ + +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */ +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ + +/* Timestamp in Rx buffer */ +#define E1000_RXPBS_CFG_TS_EN 0x80000000 + +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_RXPBSIZE_MASK 0x0000003F +#define I210_RXPBSIZE_PB_30KB 0x0000001E +#define I210_RXPBSIZE_PB_32KB 0x00000020 +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define I210_TXPBSIZE_MASK 0xC0FFFFFF +#define I210_TXPBSIZE_PB0_6KB (6 << 0) +#define I210_TXPBSIZE_PB1_6KB (6 << 6) +#define I210_TXPBSIZE_PB2_6KB (6 << 12) +#define I210_TXPBSIZE_PB3_6KB (6 << 18) + +#define I210_DTXMXPKTSZ_DEFAULT 0x00000098 + +#define I210_SR_QUEUES_NUM 2 + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +/* PBA constants */ +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_64K 0x0040 /* 64KB */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +/* LAN connected device generates an interrupt */ +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC | \ + E1000_IMS_DOUTSYNC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ + +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ + +/* Extended Interrupt Cause Set */ +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ + + +/* Transmit Descriptor Control */ +/* Enable the counting of descriptors still to be processed. */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* Transmit Config Word */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAH_ASEL_SRC_ADDR 0x00010000 +#define E1000_RAH_QSEL_ENABLE 0x10000000 +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_POOL_MASK 0x03FC0000 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 19 +#define E1000_ERR_I2C 20 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 + +/* Time Sync Interrupt Cause/Mask Register Bits */ + +#define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */ +#define TSINTR_RXTS BIT(2) /* Receive Timestamp. */ +#define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ BIT(7) /* Time Adjust Done. */ + +#define TSYNC_INTERRUPTS TSINTR_TXTS +#define E1000_TSICR_TXTS TSINTR_TXTS + +/* TSAUXC Configuration Bits */ +#define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */ + +/* SDP Configuration Bits */ +#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 + +/* PCI Express Control */ +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +/* mPHY Address Control and Data Registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ + +/* mPHY PCS CLK Register */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ +/* mPHY Near End Digital Loopback Override Bit */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* Autoneg Expansion Register */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + + +/* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DATA 16 +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ +#define NVM_VERSION 0x0005 +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +#define NVM_ETS_CFG 0x003E +#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 +#define NVM_ETS_LTHRES_DELTA_SHIFT 6 +#define NVM_ETS_TYPE_MASK 0x0038 +#define NVM_ETS_TYPE_SHIFT 3 +#define NVM_ETS_TYPE_EMC 0x000 +#define NVM_ETS_NUM_SENSORS_MASK 0x0007 +#define NVM_ETS_DATA_LOC_MASK 0x3C00 +#define NVM_ETS_DATA_LOC_SHIFT 10 +#define NVM_ETS_DATA_INDEX_MASK 0x0300 +#define NVM_ETS_DATA_INDEX_SHIFT 8 +#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ + +/* length of string needed to store part num */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - Microwire */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIE_DEVICE_CONTROL2 0x28 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define M88_VENDOR 0x0141 +#define I210_I_PHY_ID 0x01410C00 +#define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 +#define BCM54616_E_PHY_ID 0x03625D10 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +/* 1=CLK125 low, 0=CLK125 toggling */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold + * 0=Normal 10BASE-T Rx Threshold + */ +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +/* 1 = Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel i347-AT4 Registers */ + +#define I347AT4_PCDL0 0x10 /* Pair 0 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL1 0x11 /* Pair 1 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL2 0x12 /* Pair 2 PHY Cable Diagnostics Length */ +#define I347AT4_PCDL3 0x13 /* Pair 3 PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* i347-AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* i347-AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* Marvell 1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* Thermal Sensor */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ + +/* Energy Efficient Ethernet */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ +#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_M88E1543_FIBER_CTRL 0x0 +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +#define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) +#define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define E1000_VLAPQF_QUEUE_MASK 0x03 + +/* TX Qav Control fields */ +#define E1000_TQAVCTRL_XMIT_MODE BIT(0) +#define E1000_TQAVCTRL_DATAFETCHARB BIT(4) +#define E1000_TQAVCTRL_DATATRANARB BIT(8) +#define E1000_TQAVCTRL_DATATRANTIM BIT(9) +#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10) +/* Fetch Time Delta - bits 31:16 + * + * This field holds the value to be reduced from the launch time for + * fetch time decision. The FetchTimeDelta value is defined in 32 ns + * granularity. + * + * This field is 16 bits wide, and so the maximum value is: + * + * 65535 * 32 = 2097120 ~= 2.1 msec + * + * XXX: We are configuring the max value here since we couldn't come up + * with a reason for not doing so. + */ +#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16) + +/* TX Qav Credit Control fields */ +#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define E1000_TQAVCC_QUEUEMODE BIT(31) + +/* Transmit Descriptor Control fields */ +#define E1000_TXDCTL_PRIORITY BIT(27) + +#endif diff --git a/devices/igb/e1000_hw-5.14-ethercat.h b/devices/igb/e1000_hw-5.14-ethercat.h new file mode 100644 index 00000000..b193c9e6 --- /dev/null +++ b/devices/igb/e1000_hw-5.14-ethercat.h @@ -0,0 +1,554 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_IGB_HW_H_ +#define _E1000_IGB_HW_H_ + +#include +#include +#include +#include + +#include "e1000_regs-5.14-ethercat.h" +#include "e1000_defines-5.14-ethercat.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 + +#define E1000_REVISION_2 2 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_invm, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, + e1000_phy_i210, + e1000_phy_bcm54616, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac-5.14-ethercat.h" +#include "e1000_phy-5.14-ethercat.h" +#include "e1000_nvm-5.14-ethercat.h" +#include "e1000_mbx-5.14-ethercat.h" + +struct e1000_mac_operations { + s32 (*check_for_link)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +#ifdef CONFIG_IGB_HWMON + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); +#endif + void (*write_vfta)(struct e1000_hw *, u32, u32); +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); +}; + +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; + +struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); + struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +extern const struct e1000_info e1000_82575_info; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ + #define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u16 pair_length[4]; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u32 snoop; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* Type of flow control */ + enum e1000_fc_mode requested_mode; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); + s32 (*write)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*read_posted)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write_posted)(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id); + s32 (*check_for_msg)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_ack)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_rst)(struct e1000_hw *hw, u16 mbx_id); + s32 (*unlock)(struct e1000_hw *hw, u16 mbx_id); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; + u8 media_port; + bool media_changed; + bool mas_capable; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +struct net_device *igb_get_hw_dev(struct e1000_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igb_get_hw_dev(hw), format, ##arg) + +/* These functions must be implemented by drivers */ +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +#endif /* _E1000_IGB_HW_H_ */ diff --git a/devices/igb/e1000_hw-5.14-orig.h b/devices/igb/e1000_hw-5.14-orig.h new file mode 100644 index 00000000..44111f65 --- /dev/null +++ b/devices/igb/e1000_hw-5.14-orig.h @@ -0,0 +1,554 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_IGB_HW_H_ +#define _E1000_IGB_HW_H_ + +#include +#include +#include +#include + +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 + +#define E1000_REVISION_2 2 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_invm, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, + e1000_phy_i210, + e1000_phy_bcm54616, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_mbx.h" + +struct e1000_mac_operations { + s32 (*check_for_link)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +#ifdef CONFIG_IGB_HWMON + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); +#endif + void (*write_vfta)(struct e1000_hw *, u32, u32); +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); +}; + +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; + +struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); + struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +extern const struct e1000_info e1000_82575_info; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ + #define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u16 pair_length[4]; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u32 snoop; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* Type of flow control */ + enum e1000_fc_mode requested_mode; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); + s32 (*write)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*read_posted)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write_posted)(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id); + s32 (*check_for_msg)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_ack)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_rst)(struct e1000_hw *hw, u16 mbx_id); + s32 (*unlock)(struct e1000_hw *hw, u16 mbx_id); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; + u8 media_port; + bool media_changed; + bool mas_capable; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +struct net_device *igb_get_hw_dev(struct e1000_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igb_get_hw_dev(hw), format, ##arg) + +/* These functions must be implemented by drivers */ +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +#endif /* _E1000_IGB_HW_H_ */ diff --git a/devices/igb/e1000_hw-6.1-ethercat.h b/devices/igb/e1000_hw-6.1-ethercat.h new file mode 100644 index 00000000..6ce51b3a --- /dev/null +++ b/devices/igb/e1000_hw-6.1-ethercat.h @@ -0,0 +1,554 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_IGB_HW_H_ +#define _E1000_IGB_HW_H_ + +#include +#include +#include +#include + +#include "e1000_regs-6.1-ethercat.h" +#include "e1000_defines-6.1-ethercat.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 + +#define E1000_REVISION_2 2 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_invm, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, + e1000_phy_i210, + e1000_phy_bcm54616, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac-6.1-ethercat.h" +#include "e1000_phy-6.1-ethercat.h" +#include "e1000_nvm-6.1-ethercat.h" +#include "e1000_mbx-6.1-ethercat.h" + +struct e1000_mac_operations { + s32 (*check_for_link)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +#ifdef CONFIG_IGB_HWMON + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); +#endif + void (*write_vfta)(struct e1000_hw *, u32, u32); +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); +}; + +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; + +struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); + struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +extern const struct e1000_info e1000_82575_info; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ + #define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u16 pair_length[4]; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u32 snoop; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* Type of flow control */ + enum e1000_fc_mode requested_mode; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); + s32 (*write)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*read_posted)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write_posted)(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id); + s32 (*check_for_msg)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_ack)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_rst)(struct e1000_hw *hw, u16 mbx_id); + s32 (*unlock)(struct e1000_hw *hw, u16 mbx_id); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; + u8 media_port; + bool media_changed; + bool mas_capable; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +struct net_device *igb_get_hw_dev(struct e1000_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igb_get_hw_dev(hw), format, ##arg) + +/* These functions must be implemented by drivers */ +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +#endif /* _E1000_IGB_HW_H_ */ diff --git a/devices/igb/e1000_hw-6.1-orig.h b/devices/igb/e1000_hw-6.1-orig.h new file mode 100644 index 00000000..44111f65 --- /dev/null +++ b/devices/igb/e1000_hw-6.1-orig.h @@ -0,0 +1,554 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_IGB_HW_H_ +#define _E1000_IGB_HW_H_ + +#include +#include +#include +#include + +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 + +#define E1000_REVISION_2 2 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_invm, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, + e1000_phy_i210, + e1000_phy_bcm54616, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_mbx.h" + +struct e1000_mac_operations { + s32 (*check_for_link)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +#ifdef CONFIG_IGB_HWMON + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); +#endif + void (*write_vfta)(struct e1000_hw *, u32, u32); +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); +}; + +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; + +struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); + struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +extern const struct e1000_info e1000_82575_info; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ + #define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + u16 pair_length[4]; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u32 snoop; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* Type of flow control */ + enum e1000_fc_mode requested_mode; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); + s32 (*write)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*read_posted)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write_posted)(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id); + s32 (*check_for_msg)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_ack)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_rst)(struct e1000_hw *hw, u16 mbx_id); + s32 (*unlock)(struct e1000_hw *hw, u16 mbx_id); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; + u8 media_port; + bool media_changed; + bool mas_capable; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +struct net_device *igb_get_hw_dev(struct e1000_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igb_get_hw_dev(hw), format, ##arg) + +/* These functions must be implemented by drivers */ +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +#endif /* _E1000_IGB_HW_H_ */ diff --git a/devices/igb/e1000_i210-5.14-ethercat.c b/devices/igb/e1000_i210-5.14-ethercat.c new file mode 100644 index 00000000..b5228a60 --- /dev/null +++ b/devices/igb/e1000_i210-5.14-ethercat.c @@ -0,0 +1,912 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* e1000_i210 + * e1000_i211 + */ + +#include +#include + +#include "e1000_hw-5.14-ethercat.h" +#include "e1000_i210-5.14-ethercat.h" + +static s32 igb_update_flash_i210(struct e1000_hw *hw); + +/** + * igb_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._82575.clear_semaphore_once) { + hw->dev_spec._82575.clear_semaphore_once = false; + igb_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * igb_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) +{ + return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_i210(struct e1000_hw *hw) +{ + igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + while (i < timeout) { + if (igb_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore_i210(hw)) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igb_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + wr32(E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + rd32(E1000_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + hw_dbg("Read INVM Word 0x%02x = %x\n", + address, *data); + status = 0; + break; + } + } + } + if (status) + hw_dbg("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** + * igb_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure + * @offset: offset to read from + * @words: number of words to read (unused) + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 __always_unused words, u16 *data) +{ + s32 ret_val = 0; + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); + if (ret_val) + hw_dbg("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_INIT_CTRL_4: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_1_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_1_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_0_2_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_ID_LED_SETTINGS: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = ID_LED_RESERVED_FFFF; + ret_val = 0; + } + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + hw_dbg("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * igb_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver) { + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / + E1000_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[E1000_INVM_SIZE]; + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; + status = 0; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + /* Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; + status = 0; + break; + } + /* Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + } + + if (!status) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; + status = 0; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + status = 0; + break; + } + } + return status; +} + +/** + * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = 0; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + if (!(hw->nvm.ops.acquire(hw))) { + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igb_read_nvm_eerd; + + status = igb_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * igb_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + if (!(hw->nvm.ops.acquire(hw))) { + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igb_update_flash_i210(hw); + } else { + ret_val = -E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * igb_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = rd32(E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igb_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool igb_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + eec = rd32(E1000_EECD); + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** + * igb_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +static s32 igb_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; + wr32(E1000_EECD, flup); + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igb_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} + +/** + * __igb_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igb_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igb_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igb_init_nvm_params_i210 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 igb_init_nvm_params_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + struct e1000_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igb_acquire_nvm_i210; + nvm->ops.release = igb_release_nvm_i210; + nvm->ops.valid_led_default = igb_valid_led_default_i210; + + /* NVM Function Pointers */ + if (igb_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = igb_read_nvm_srrd_i210; + nvm->ops.write = igb_write_nvm_srwr_i210; + nvm->ops.validate = igb_validate_nvm_checksum_i210; + nvm->ops.update = igb_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = igb_read_invm_i210; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return ret_val; +} + +/** + * igb_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +s32 igb_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = rd32(E1000_WUC); + mdicnfg = rd32(E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + wr32(E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); + phy_word = E1000_PHY_PLL_UNCONF; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = 0; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + wr32(E1000_CTRL_EXT, ctrl_ext); + + wr32(E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + wr32(E1000_EEARBC_I210, reg_val); + + igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + usleep_range(1000, 2000); + pci_word &= ~E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + wr32(E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + wr32(E1000_WUC, wuc); + } + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); + /* restore MDICNFG setting */ + wr32(E1000_MDICNFG, mdicnfg); + return ret_val; +} + +/** + * igb_get_cfg_done_i210 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +s32 igb_get_cfg_done_i210(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + while (timeout) { + if (rd32(E1000_EEMNGCTL_I210) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + return 0; +} diff --git a/devices/igb/e1000_i210-5.14-ethercat.h b/devices/igb/e1000_i210-5.14-ethercat.h new file mode 100644 index 00000000..5c437fdc --- /dev/null +++ b/devices/igb/e1000_i210-5.14-ethercat.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); +s32 igb_init_nvm_params_i210(struct e1000_hw *hw); +bool igb_get_flash_presence_i210(struct e1000_hw *hw); +s32 igb_pll_workaround_i210(struct e1000_hw *hw); +s32 igb_get_cfg_done_i210(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define E1000_INVM_ULT_BYTES_SIZE 8 +#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 + +#define E1000_INVM_MAJOR_MASK 0x3F0 +#define E1000_INVM_MINOR_MASK 0xF +#define E1000_INVM_MAJOR_SHIFT 4 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for i211 device */ +#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C + +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + +#endif diff --git a/devices/igb/e1000_i210-5.14-orig.c b/devices/igb/e1000_i210-5.14-orig.c new file mode 100644 index 00000000..92659014 --- /dev/null +++ b/devices/igb/e1000_i210-5.14-orig.c @@ -0,0 +1,912 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* e1000_i210 + * e1000_i211 + */ + +#include +#include + +#include "e1000_hw.h" +#include "e1000_i210.h" + +static s32 igb_update_flash_i210(struct e1000_hw *hw); + +/** + * igb_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._82575.clear_semaphore_once) { + hw->dev_spec._82575.clear_semaphore_once = false; + igb_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * igb_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) +{ + return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_i210(struct e1000_hw *hw) +{ + igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + while (i < timeout) { + if (igb_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore_i210(hw)) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igb_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + wr32(E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + rd32(E1000_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + hw_dbg("Read INVM Word 0x%02x = %x\n", + address, *data); + status = 0; + break; + } + } + } + if (status) + hw_dbg("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** + * igb_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure + * @offset: offset to read from + * @words: number of words to read (unused) + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 __always_unused words, u16 *data) +{ + s32 ret_val = 0; + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); + if (ret_val) + hw_dbg("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_INIT_CTRL_4: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_1_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_1_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_0_2_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_ID_LED_SETTINGS: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = ID_LED_RESERVED_FFFF; + ret_val = 0; + } + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + hw_dbg("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * igb_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver) { + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / + E1000_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[E1000_INVM_SIZE]; + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; + status = 0; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + /* Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; + status = 0; + break; + } + /* Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + } + + if (!status) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; + status = 0; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + status = 0; + break; + } + } + return status; +} + +/** + * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = 0; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + if (!(hw->nvm.ops.acquire(hw))) { + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igb_read_nvm_eerd; + + status = igb_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * igb_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + if (!(hw->nvm.ops.acquire(hw))) { + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igb_update_flash_i210(hw); + } else { + ret_val = -E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * igb_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = rd32(E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igb_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool igb_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + eec = rd32(E1000_EECD); + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** + * igb_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +static s32 igb_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; + wr32(E1000_EECD, flup); + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igb_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} + +/** + * __igb_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igb_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igb_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igb_init_nvm_params_i210 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 igb_init_nvm_params_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + struct e1000_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igb_acquire_nvm_i210; + nvm->ops.release = igb_release_nvm_i210; + nvm->ops.valid_led_default = igb_valid_led_default_i210; + + /* NVM Function Pointers */ + if (igb_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = igb_read_nvm_srrd_i210; + nvm->ops.write = igb_write_nvm_srwr_i210; + nvm->ops.validate = igb_validate_nvm_checksum_i210; + nvm->ops.update = igb_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = igb_read_invm_i210; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return ret_val; +} + +/** + * igb_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +s32 igb_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = rd32(E1000_WUC); + mdicnfg = rd32(E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + wr32(E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); + phy_word = E1000_PHY_PLL_UNCONF; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = 0; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + wr32(E1000_CTRL_EXT, ctrl_ext); + + wr32(E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + wr32(E1000_EEARBC_I210, reg_val); + + igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + usleep_range(1000, 2000); + pci_word &= ~E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + wr32(E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + wr32(E1000_WUC, wuc); + } + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); + /* restore MDICNFG setting */ + wr32(E1000_MDICNFG, mdicnfg); + return ret_val; +} + +/** + * igb_get_cfg_done_i210 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +s32 igb_get_cfg_done_i210(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + while (timeout) { + if (rd32(E1000_EEMNGCTL_I210) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + return 0; +} diff --git a/devices/igb/e1000_i210-5.14-orig.h b/devices/igb/e1000_i210-5.14-orig.h new file mode 100644 index 00000000..5c437fdc --- /dev/null +++ b/devices/igb/e1000_i210-5.14-orig.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); +s32 igb_init_nvm_params_i210(struct e1000_hw *hw); +bool igb_get_flash_presence_i210(struct e1000_hw *hw); +s32 igb_pll_workaround_i210(struct e1000_hw *hw); +s32 igb_get_cfg_done_i210(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define E1000_INVM_ULT_BYTES_SIZE 8 +#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 + +#define E1000_INVM_MAJOR_MASK 0x3F0 +#define E1000_INVM_MINOR_MASK 0xF +#define E1000_INVM_MAJOR_SHIFT 4 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for i211 device */ +#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C + +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + +#endif diff --git a/devices/igb/e1000_i210-6.1-ethercat.c b/devices/igb/e1000_i210-6.1-ethercat.c new file mode 100644 index 00000000..a1ef702e --- /dev/null +++ b/devices/igb/e1000_i210-6.1-ethercat.c @@ -0,0 +1,911 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* e1000_i210 + * e1000_i211 + */ + +#include +#include + +#include "e1000_hw-6.1-ethercat.h" +#include "e1000_i210-6.1-ethercat.h" + +static s32 igb_update_flash_i210(struct e1000_hw *hw); + +/** + * igb_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._82575.clear_semaphore_once) { + hw->dev_spec._82575.clear_semaphore_once = false; + igb_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * igb_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) +{ + return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_i210(struct e1000_hw *hw) +{ + igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + while (i < timeout) { + if (igb_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore_i210(hw)) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igb_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + wr32(E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + rd32(E1000_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + hw_dbg("Read INVM Word 0x%02x = %x\n", + address, *data); + status = 0; + break; + } + } + } + if (status) + hw_dbg("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** + * igb_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure + * @offset: offset to read from + * @words: number of words to read (unused) + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 __always_unused words, u16 *data) +{ + s32 ret_val = 0; + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); + if (ret_val) + hw_dbg("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_INIT_CTRL_4: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_1_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_1_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_0_2_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_ID_LED_SETTINGS: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = ID_LED_RESERVED_FFFF; + ret_val = 0; + } + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + hw_dbg("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * igb_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver) { + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / + E1000_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[E1000_INVM_SIZE]; + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; + status = 0; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + /* Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; + status = 0; + break; + } + /* Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + } + + if (!status) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; + status = 0; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + status = 0; + break; + } + } + return status; +} + +/** + * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = 0; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + if (!(hw->nvm.ops.acquire(hw))) { + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igb_read_nvm_eerd; + + status = igb_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * igb_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + if (!(hw->nvm.ops.acquire(hw))) { + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igb_update_flash_i210(hw); + } else { + ret_val = -E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * igb_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = rd32(E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igb_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool igb_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + eec = rd32(E1000_EECD); + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** + * igb_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +static s32 igb_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; + wr32(E1000_EECD, flup); + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igb_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} + +/** + * __igb_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igb_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igb_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igb_init_nvm_params_i210 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 igb_init_nvm_params_i210(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igb_acquire_nvm_i210; + nvm->ops.release = igb_release_nvm_i210; + nvm->ops.valid_led_default = igb_valid_led_default_i210; + + /* NVM Function Pointers */ + if (igb_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = igb_read_nvm_srrd_i210; + nvm->ops.write = igb_write_nvm_srwr_i210; + nvm->ops.validate = igb_validate_nvm_checksum_i210; + nvm->ops.update = igb_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = igb_read_invm_i210; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return 0; +} + +/** + * igb_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +s32 igb_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = rd32(E1000_WUC); + mdicnfg = rd32(E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + wr32(E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); + phy_word = E1000_PHY_PLL_UNCONF; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = 0; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + wr32(E1000_CTRL_EXT, ctrl_ext); + + wr32(E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + wr32(E1000_EEARBC_I210, reg_val); + + igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + usleep_range(1000, 2000); + pci_word &= ~E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + wr32(E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + wr32(E1000_WUC, wuc); + } + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); + /* restore MDICNFG setting */ + wr32(E1000_MDICNFG, mdicnfg); + return ret_val; +} + +/** + * igb_get_cfg_done_i210 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +s32 igb_get_cfg_done_i210(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + while (timeout) { + if (rd32(E1000_EEMNGCTL_I210) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + return 0; +} diff --git a/devices/igb/e1000_i210-6.1-ethercat.h b/devices/igb/e1000_i210-6.1-ethercat.h new file mode 100644 index 00000000..5c437fdc --- /dev/null +++ b/devices/igb/e1000_i210-6.1-ethercat.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); +s32 igb_init_nvm_params_i210(struct e1000_hw *hw); +bool igb_get_flash_presence_i210(struct e1000_hw *hw); +s32 igb_pll_workaround_i210(struct e1000_hw *hw); +s32 igb_get_cfg_done_i210(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define E1000_INVM_ULT_BYTES_SIZE 8 +#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 + +#define E1000_INVM_MAJOR_MASK 0x3F0 +#define E1000_INVM_MINOR_MASK 0xF +#define E1000_INVM_MAJOR_SHIFT 4 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for i211 device */ +#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C + +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + +#endif diff --git a/devices/igb/e1000_i210-6.1-orig.c b/devices/igb/e1000_i210-6.1-orig.c new file mode 100644 index 00000000..b9b9d354 --- /dev/null +++ b/devices/igb/e1000_i210-6.1-orig.c @@ -0,0 +1,911 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* e1000_i210 + * e1000_i211 + */ + +#include +#include + +#include "e1000_hw.h" +#include "e1000_i210.h" + +static s32 igb_update_flash_i210(struct e1000_hw *hw); + +/** + * igb_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._82575.clear_semaphore_once) { + hw->dev_spec._82575.clear_semaphore_once = false; + igb_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * igb_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) +{ + return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_i210(struct e1000_hw *hw) +{ + igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + while (i < timeout) { + if (igb_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore_i210(hw)) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igb_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + wr32(E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + rd32(E1000_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + hw_dbg("Read INVM Word 0x%02x = %x\n", + address, *data); + status = 0; + break; + } + } + } + if (status) + hw_dbg("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** + * igb_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure + * @offset: offset to read from + * @words: number of words to read (unused) + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 __always_unused words, u16 *data) +{ + s32 ret_val = 0; + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); + if (ret_val) + hw_dbg("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_INIT_CTRL_4: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_1_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_1_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_0_2_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_ID_LED_SETTINGS: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = ID_LED_RESERVED_FFFF; + ret_val = 0; + } + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + hw_dbg("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * igb_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver) { + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / + E1000_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[E1000_INVM_SIZE]; + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; + status = 0; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + /* Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; + status = 0; + break; + } + /* Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + } + + if (!status) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; + status = 0; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + status = 0; + break; + } + } + return status; +} + +/** + * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = 0; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + if (!(hw->nvm.ops.acquire(hw))) { + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igb_read_nvm_eerd; + + status = igb_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * igb_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + if (!(hw->nvm.ops.acquire(hw))) { + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igb_update_flash_i210(hw); + } else { + ret_val = -E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * igb_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = rd32(E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igb_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool igb_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + eec = rd32(E1000_EECD); + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** + * igb_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +static s32 igb_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; + wr32(E1000_EECD, flup); + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igb_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} + +/** + * __igb_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igb_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igb_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igb_init_nvm_params_i210 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 igb_init_nvm_params_i210(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igb_acquire_nvm_i210; + nvm->ops.release = igb_release_nvm_i210; + nvm->ops.valid_led_default = igb_valid_led_default_i210; + + /* NVM Function Pointers */ + if (igb_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = igb_read_nvm_srrd_i210; + nvm->ops.write = igb_write_nvm_srwr_i210; + nvm->ops.validate = igb_validate_nvm_checksum_i210; + nvm->ops.update = igb_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = igb_read_invm_i210; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return 0; +} + +/** + * igb_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +s32 igb_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = rd32(E1000_WUC); + mdicnfg = rd32(E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + wr32(E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); + phy_word = E1000_PHY_PLL_UNCONF; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = 0; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + wr32(E1000_CTRL_EXT, ctrl_ext); + + wr32(E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + wr32(E1000_EEARBC_I210, reg_val); + + igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + usleep_range(1000, 2000); + pci_word &= ~E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + wr32(E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + wr32(E1000_WUC, wuc); + } + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); + /* restore MDICNFG setting */ + wr32(E1000_MDICNFG, mdicnfg); + return ret_val; +} + +/** + * igb_get_cfg_done_i210 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +s32 igb_get_cfg_done_i210(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + while (timeout) { + if (rd32(E1000_EEMNGCTL_I210) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + return 0; +} diff --git a/devices/igb/e1000_i210-6.1-orig.h b/devices/igb/e1000_i210-6.1-orig.h new file mode 100644 index 00000000..5c437fdc --- /dev/null +++ b/devices/igb/e1000_i210-6.1-orig.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); +s32 igb_init_nvm_params_i210(struct e1000_hw *hw); +bool igb_get_flash_presence_i210(struct e1000_hw *hw); +s32 igb_pll_workaround_i210(struct e1000_hw *hw); +s32 igb_get_cfg_done_i210(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define E1000_INVM_ULT_BYTES_SIZE 8 +#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 + +#define E1000_INVM_MAJOR_MASK 0x3F0 +#define E1000_INVM_MINOR_MASK 0xF +#define E1000_INVM_MAJOR_SHIFT 4 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for i211 device */ +#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C + +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + +#endif diff --git a/devices/igb/e1000_m-5.14-orig.c.c b/devices/igb/e1000_m-5.14-orig.c.c new file mode 100644 index 00000000..e63ee3cc --- /dev/null +++ b/devices/igb/e1000_m-5.14-orig.c.c @@ -0,0 +1,1681 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include +#include +#include +#include + +#include "e1000_mac.h" + +#include "igb.h" + +static s32 igb_set_default_fc(struct e1000_hw *hw); +static void igb_set_fc_watermarks(struct e1000_hw *hw); + +/** + * igb_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 igb_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u32 reg; + u16 pcie_link_status; + + bus->type = e1000_bus_type_pci_express; + + ret_val = igb_read_pcie_cap_reg(hw, + PCI_EXP_LNKSTA, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + bus->speed = e1000_bus_speed_2500; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT); + } + + reg = rd32(E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + + return 0; +} + +/** + * igb_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void igb_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;) + hw->mac.ops.write_vfta(hw, offset, 0); +} + +/** + * igb_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + + array_wr32(E1000_VFTA, offset, value); + wrfl(); + + adapter->shadow_vfta[offset] = value; +} + +/** + * igb_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = {0}; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igb_find_vlvf_slot - find the VLAN id or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vlvf_bypass: skip VLVF if no match is found + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass) +{ + s32 regindex, first_empty_slot; + u32 bits; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0; + + /* Search for the VLAN id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) { + bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK; + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) + first_empty_slot = regindex; + } + + return first_empty_slot ? : -E1000_ERR_NO_SPACE; +} + +/** + * igb_vfta_set - enable or disable vlan in VLAN filter table + * @hw: pointer to the HW structure + * @vlan: VLAN id to add or remove + * @vind: VMDq output index that maps queue to VLAN id + * @vlan_on: if true add filter, if false remove + * @vlvf_bypass: skip VLVF if no match is found + * + * Sets or clears a bit in the VLAN filter table array based on VLAN id + * and if we are adding or removing the filter + **/ +s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + struct igb_adapter *adapter = hw->back; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; + + if ((vlan > 4095) || (vind > 7)) + return -E1000_ERR_PARAM; + + /* this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regidx = vlan / 32; + vfta_delta = BIT(vlan % 32); + vfta = adapter->shadow_vfta[regidx]; + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; + + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the VLAN is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + if (!adapter->vfs_allocated_count) + goto vfta_update; + + vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; + } + + bits = rd32(E1000_VLVF(vlvf_index)); + + /* set the pool bit */ + bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); + + if (!(bits & E1000_VLVF_POOLSEL_MASK)) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool + */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + /* disable VLVF and clear remaining bit from pool */ + wr32(E1000_VLVF(vlvf_index), 0); + + return 0; + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE); + +vfta_update: + /* bit was set/cleared before we started */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + return 0; +} + +/** + * igb_check_alt_mac_addr - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is saved in the hw struct and + * programmed into RAR0 and the function returns success, otherwise the + * function returns an error. + **/ +s32 igb_check_alt_mac_addr(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = 0; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + goto out; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * igb_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +/** + * igb_mta_set - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +void igb_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + /* The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = array_rd32(E1000_MTA, hash_reg); + + mta |= BIT(hash_bit); + + array_wr32(E1000_MTA, hash_reg, mta); + wrfl(); +} + +/** + * igb_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igb_mta_set() + **/ +static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igb_i21x_hw_doublecheck - double checks potential HW issue in i21X + * @hw: pointer to the HW structure + * + * Checks if multicast array is wrote correctly + * If not then rewrites again to register + **/ +static void igb_i21x_hw_doublecheck(struct e1000_hw *hw) +{ + bool is_failed; + int i; + + do { + is_failed = false; + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) { + if (array_rd32(E1000_MTA, i) != hw->mac.mta_shadow[i]) { + is_failed = true; + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + break; + } + } + } while (is_failed); +} + +/** + * igb_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = igb_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + igb_i21x_hw_doublecheck(hw); +} + +/** + * igb_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void igb_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + rd32(E1000_CRCERRS); + rd32(E1000_SYMERRS); + rd32(E1000_MPC); + rd32(E1000_SCC); + rd32(E1000_ECOL); + rd32(E1000_MCC); + rd32(E1000_LATECOL); + rd32(E1000_COLC); + rd32(E1000_DC); + rd32(E1000_SEC); + rd32(E1000_RLEC); + rd32(E1000_XONRXC); + rd32(E1000_XONTXC); + rd32(E1000_XOFFRXC); + rd32(E1000_XOFFTXC); + rd32(E1000_FCRUC); + rd32(E1000_GPRC); + rd32(E1000_BPRC); + rd32(E1000_MPRC); + rd32(E1000_GPTC); + rd32(E1000_GORCL); + rd32(E1000_GORCH); + rd32(E1000_GOTCL); + rd32(E1000_GOTCH); + rd32(E1000_RNBC); + rd32(E1000_RUC); + rd32(E1000_RFC); + rd32(E1000_ROC); + rd32(E1000_RJC); + rd32(E1000_TORL); + rd32(E1000_TORH); + rd32(E1000_TOTL); + rd32(E1000_TOTH); + rd32(E1000_TPR); + rd32(E1000_TPT); + rd32(E1000_MPTC); + rd32(E1000_BPTC); +} + +/** + * igb_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 igb_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igb_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igb_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * igb_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 igb_setup_link(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igb_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = igb_set_default_fc(hw); + if (ret_val) + goto out; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(E1000_FCT, FLOW_CONTROL_TYPE); + wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(E1000_FCTTV, hw->fc.pause_time); + + igb_set_fc_watermarks(hw); + +out: + + return ret_val; +} + +/** + * igb_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void igb_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = rd32(E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + wr32(E1000_TCTL, tctl); + wrfl(); +} + +/** + * igb_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + **/ +static void igb_set_fc_watermarks(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(E1000_FCRTL, fcrtl); + wr32(E1000_FCRTH, fcrth); +} + +/** + * igb_set_default_fc - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 igb_set_default_fc(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 lan_offset; + u16 nvm_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == e1000_i350) + lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + else + lan_offset = 0; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset, + 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + +out: + return ret_val; +} + +/** + * igb_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 igb_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = 0; + + ctrl = rd32(E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + wr32(E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igb_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 igb_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = igb_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = igb_force_mac_fc(hw); + } + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == e1000_fc_none) || + (hw->fc.requested_mode == e1000_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) + && mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = rd32(E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + hw_dbg("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = rd32(E1000_PCS_ANADV); + pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = rd32(E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + wr32(E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + +out: + return ret_val; +} + +/** + * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igb_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 igb_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = 0; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void igb_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = rd32(E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + wr32(E1000_SWSM, swsm); +} + +/** + * igb_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 igb_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + s32 ret_val = 0; + + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * igb_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 igb_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + /* i210 and i211 devices have different LED mechanism */ + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) + ret_val = igb_valid_led_default_i210(hw, &data); + else + ret_val = igb_valid_led_default(hw, &data); + + if (ret_val) + goto out; + + mac->ledctl_default = rd32(E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * igb_cleanup_led - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 igb_cleanup_led(struct e1000_hw *hw) +{ + wr32(E1000_LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * igb_blink_led - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + **/ +s32 igb_blink_led(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + wr32(E1000_LEDCTL, ledctl_blink); + + return 0; +} + +/** + * igb_led_off - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 igb_led_off(struct e1000_hw *hw) +{ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * igb_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 igb_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + wr32(E1000_CTRL, ctrl); + + while (timeout) { + if (!(rd32(E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_mdi_setting - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotitation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +s32 igb_validate_mdi_setting(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* All MDI settings are supported on 82580 and newer. */ + if (hw->mac.type >= e1000_82580) + goto out; + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + hw_dbg("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + s32 ret_val = 0; + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + wr32(reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + udelay(5); + regvalue = rd32(reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + hw_dbg("Reg %08x did not indicate ready\n", reg); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool igb_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = false; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(E1000_FWSM); + factps = rd32(E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} diff --git a/devices/igb/e1000_mac-5.14-ethercat.c b/devices/igb/e1000_mac-5.14-ethercat.c new file mode 100644 index 00000000..529a78d3 --- /dev/null +++ b/devices/igb/e1000_mac-5.14-ethercat.c @@ -0,0 +1,1681 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include +#include +#include +#include + +#include "e1000_mac-5.14-ethercat.h" + +#include "igb-5.14-ethercat.h" + +static s32 igb_set_default_fc(struct e1000_hw *hw); +static void igb_set_fc_watermarks(struct e1000_hw *hw); + +/** + * igb_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 igb_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u32 reg; + u16 pcie_link_status; + + bus->type = e1000_bus_type_pci_express; + + ret_val = igb_read_pcie_cap_reg(hw, + PCI_EXP_LNKSTA, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + bus->speed = e1000_bus_speed_2500; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT); + } + + reg = rd32(E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + + return 0; +} + +/** + * igb_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void igb_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;) + hw->mac.ops.write_vfta(hw, offset, 0); +} + +/** + * igb_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + + array_wr32(E1000_VFTA, offset, value); + wrfl(); + + adapter->shadow_vfta[offset] = value; +} + +/** + * igb_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = {0}; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igb_find_vlvf_slot - find the VLAN id or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vlvf_bypass: skip VLVF if no match is found + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass) +{ + s32 regindex, first_empty_slot; + u32 bits; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0; + + /* Search for the VLAN id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) { + bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK; + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) + first_empty_slot = regindex; + } + + return first_empty_slot ? : -E1000_ERR_NO_SPACE; +} + +/** + * igb_vfta_set - enable or disable vlan in VLAN filter table + * @hw: pointer to the HW structure + * @vlan: VLAN id to add or remove + * @vind: VMDq output index that maps queue to VLAN id + * @vlan_on: if true add filter, if false remove + * @vlvf_bypass: skip VLVF if no match is found + * + * Sets or clears a bit in the VLAN filter table array based on VLAN id + * and if we are adding or removing the filter + **/ +s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + struct igb_adapter *adapter = hw->back; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; + + if ((vlan > 4095) || (vind > 7)) + return -E1000_ERR_PARAM; + + /* this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regidx = vlan / 32; + vfta_delta = BIT(vlan % 32); + vfta = adapter->shadow_vfta[regidx]; + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; + + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the VLAN is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + if (!adapter->vfs_allocated_count) + goto vfta_update; + + vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; + } + + bits = rd32(E1000_VLVF(vlvf_index)); + + /* set the pool bit */ + bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); + + if (!(bits & E1000_VLVF_POOLSEL_MASK)) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool + */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + /* disable VLVF and clear remaining bit from pool */ + wr32(E1000_VLVF(vlvf_index), 0); + + return 0; + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE); + +vfta_update: + /* bit was set/cleared before we started */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + return 0; +} + +/** + * igb_check_alt_mac_addr - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is saved in the hw struct and + * programmed into RAR0 and the function returns success, otherwise the + * function returns an error. + **/ +s32 igb_check_alt_mac_addr(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = 0; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + goto out; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * igb_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +/** + * igb_mta_set - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +void igb_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + /* The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = array_rd32(E1000_MTA, hash_reg); + + mta |= BIT(hash_bit); + + array_wr32(E1000_MTA, hash_reg, mta); + wrfl(); +} + +/** + * igb_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igb_mta_set() + **/ +static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igb_i21x_hw_doublecheck - double checks potential HW issue in i21X + * @hw: pointer to the HW structure + * + * Checks if multicast array is wrote correctly + * If not then rewrites again to register + **/ +static void igb_i21x_hw_doublecheck(struct e1000_hw *hw) +{ + bool is_failed; + int i; + + do { + is_failed = false; + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) { + if (array_rd32(E1000_MTA, i) != hw->mac.mta_shadow[i]) { + is_failed = true; + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + break; + } + } + } while (is_failed); +} + +/** + * igb_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = igb_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + igb_i21x_hw_doublecheck(hw); +} + +/** + * igb_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void igb_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + rd32(E1000_CRCERRS); + rd32(E1000_SYMERRS); + rd32(E1000_MPC); + rd32(E1000_SCC); + rd32(E1000_ECOL); + rd32(E1000_MCC); + rd32(E1000_LATECOL); + rd32(E1000_COLC); + rd32(E1000_DC); + rd32(E1000_SEC); + rd32(E1000_RLEC); + rd32(E1000_XONRXC); + rd32(E1000_XONTXC); + rd32(E1000_XOFFRXC); + rd32(E1000_XOFFTXC); + rd32(E1000_FCRUC); + rd32(E1000_GPRC); + rd32(E1000_BPRC); + rd32(E1000_MPRC); + rd32(E1000_GPTC); + rd32(E1000_GORCL); + rd32(E1000_GORCH); + rd32(E1000_GOTCL); + rd32(E1000_GOTCH); + rd32(E1000_RNBC); + rd32(E1000_RUC); + rd32(E1000_RFC); + rd32(E1000_ROC); + rd32(E1000_RJC); + rd32(E1000_TORL); + rd32(E1000_TORH); + rd32(E1000_TOTL); + rd32(E1000_TOTH); + rd32(E1000_TPR); + rd32(E1000_TPT); + rd32(E1000_MPTC); + rd32(E1000_BPTC); +} + +/** + * igb_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 igb_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igb_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igb_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * igb_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 igb_setup_link(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igb_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = igb_set_default_fc(hw); + if (ret_val) + goto out; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(E1000_FCT, FLOW_CONTROL_TYPE); + wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(E1000_FCTTV, hw->fc.pause_time); + + igb_set_fc_watermarks(hw); + +out: + + return ret_val; +} + +/** + * igb_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void igb_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = rd32(E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + wr32(E1000_TCTL, tctl); + wrfl(); +} + +/** + * igb_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + **/ +static void igb_set_fc_watermarks(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(E1000_FCRTL, fcrtl); + wr32(E1000_FCRTH, fcrth); +} + +/** + * igb_set_default_fc - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 igb_set_default_fc(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 lan_offset; + u16 nvm_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == e1000_i350) + lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + else + lan_offset = 0; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset, + 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + +out: + return ret_val; +} + +/** + * igb_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 igb_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = 0; + + ctrl = rd32(E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + wr32(E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igb_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 igb_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = igb_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = igb_force_mac_fc(hw); + } + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == e1000_fc_none) || + (hw->fc.requested_mode == e1000_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) + && mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = rd32(E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + hw_dbg("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = rd32(E1000_PCS_ANADV); + pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = rd32(E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + wr32(E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + +out: + return ret_val; +} + +/** + * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igb_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 igb_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = 0; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void igb_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = rd32(E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + wr32(E1000_SWSM, swsm); +} + +/** + * igb_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 igb_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + s32 ret_val = 0; + + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * igb_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 igb_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + /* i210 and i211 devices have different LED mechanism */ + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) + ret_val = igb_valid_led_default_i210(hw, &data); + else + ret_val = igb_valid_led_default(hw, &data); + + if (ret_val) + goto out; + + mac->ledctl_default = rd32(E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * igb_cleanup_led - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 igb_cleanup_led(struct e1000_hw *hw) +{ + wr32(E1000_LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * igb_blink_led - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + **/ +s32 igb_blink_led(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + wr32(E1000_LEDCTL, ledctl_blink); + + return 0; +} + +/** + * igb_led_off - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 igb_led_off(struct e1000_hw *hw) +{ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * igb_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 igb_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + wr32(E1000_CTRL, ctrl); + + while (timeout) { + if (!(rd32(E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_mdi_setting - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotitation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +s32 igb_validate_mdi_setting(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* All MDI settings are supported on 82580 and newer. */ + if (hw->mac.type >= e1000_82580) + goto out; + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + hw_dbg("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + s32 ret_val = 0; + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + wr32(reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + udelay(5); + regvalue = rd32(reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + hw_dbg("Reg %08x did not indicate ready\n", reg); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool igb_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = false; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(E1000_FWSM); + factps = rd32(E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} diff --git a/devices/igb/e1000_mac-5.14-ethercat.h b/devices/igb/e1000_mac-5.14-ethercat.h new file mode 100644 index 00000000..15d5a8f8 --- /dev/null +++ b/devices/igb/e1000_mac-5.14-ethercat.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +#include "e1000_hw-5.14-ethercat.h" + +#include "e1000_phy-5.14-ethercat.h" +#include "e1000_nvm-5.14-ethercat.h" +#include "e1000_defines-5.14-ethercat.h" +#include "e1000_i210-5.14-ethercat.h" + +/* Functions that should not be called directly from drivers but can be used + * by other files in this 'shared code' + */ +s32 igb_blink_led(struct e1000_hw *hw); +s32 igb_check_for_copper_link(struct e1000_hw *hw); +s32 igb_cleanup_led(struct e1000_hw *hw); +s32 igb_config_fc_after_link_up(struct e1000_hw *hw); +s32 igb_disable_pcie_master(struct e1000_hw *hw); +s32 igb_force_mac_fc(struct e1000_hw *hw); +s32 igb_get_auto_rd_done(struct e1000_hw *hw); +s32 igb_get_bus_info_pcie(struct e1000_hw *hw); +s32 igb_get_hw_semaphore(struct e1000_hw *hw); +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 igb_id_led_init(struct e1000_hw *hw); +s32 igb_led_off(struct e1000_hw *hw); +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 igb_setup_link(struct e1000_hw *hw); +s32 igb_validate_mdi_setting(struct e1000_hw *hw); +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +void igb_clear_hw_cntrs_base(struct e1000_hw *hw); +void igb_clear_vfta(struct e1000_hw *hw); +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, u32 vind, + bool vlan_on, bool vlvf_bypass); +void igb_config_collision_dist(struct e1000_hw *hw); +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void igb_mta_set(struct e1000_hw *hw, u32 hash_value); +void igb_put_hw_semaphore(struct e1000_hw *hw); +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +s32 igb_check_alt_mac_addr(struct e1000_hw *hw); + +bool igb_enable_mng_pass_thru(struct e1000_hw *hw); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +void e1000_init_function_pointers_82575(struct e1000_hw *hw); + +#endif diff --git a/devices/igb/e1000_mac-5.14-orig.c b/devices/igb/e1000_mac-5.14-orig.c new file mode 100644 index 00000000..e63ee3cc --- /dev/null +++ b/devices/igb/e1000_mac-5.14-orig.c @@ -0,0 +1,1681 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include +#include +#include +#include + +#include "e1000_mac.h" + +#include "igb.h" + +static s32 igb_set_default_fc(struct e1000_hw *hw); +static void igb_set_fc_watermarks(struct e1000_hw *hw); + +/** + * igb_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 igb_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u32 reg; + u16 pcie_link_status; + + bus->type = e1000_bus_type_pci_express; + + ret_val = igb_read_pcie_cap_reg(hw, + PCI_EXP_LNKSTA, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + bus->speed = e1000_bus_speed_2500; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT); + } + + reg = rd32(E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + + return 0; +} + +/** + * igb_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void igb_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;) + hw->mac.ops.write_vfta(hw, offset, 0); +} + +/** + * igb_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + + array_wr32(E1000_VFTA, offset, value); + wrfl(); + + adapter->shadow_vfta[offset] = value; +} + +/** + * igb_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = {0}; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igb_find_vlvf_slot - find the VLAN id or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vlvf_bypass: skip VLVF if no match is found + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass) +{ + s32 regindex, first_empty_slot; + u32 bits; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0; + + /* Search for the VLAN id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) { + bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK; + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) + first_empty_slot = regindex; + } + + return first_empty_slot ? : -E1000_ERR_NO_SPACE; +} + +/** + * igb_vfta_set - enable or disable vlan in VLAN filter table + * @hw: pointer to the HW structure + * @vlan: VLAN id to add or remove + * @vind: VMDq output index that maps queue to VLAN id + * @vlan_on: if true add filter, if false remove + * @vlvf_bypass: skip VLVF if no match is found + * + * Sets or clears a bit in the VLAN filter table array based on VLAN id + * and if we are adding or removing the filter + **/ +s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + struct igb_adapter *adapter = hw->back; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; + + if ((vlan > 4095) || (vind > 7)) + return -E1000_ERR_PARAM; + + /* this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regidx = vlan / 32; + vfta_delta = BIT(vlan % 32); + vfta = adapter->shadow_vfta[regidx]; + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; + + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the VLAN is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + if (!adapter->vfs_allocated_count) + goto vfta_update; + + vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; + } + + bits = rd32(E1000_VLVF(vlvf_index)); + + /* set the pool bit */ + bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); + + if (!(bits & E1000_VLVF_POOLSEL_MASK)) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool + */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + /* disable VLVF and clear remaining bit from pool */ + wr32(E1000_VLVF(vlvf_index), 0); + + return 0; + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE); + +vfta_update: + /* bit was set/cleared before we started */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + return 0; +} + +/** + * igb_check_alt_mac_addr - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is saved in the hw struct and + * programmed into RAR0 and the function returns success, otherwise the + * function returns an error. + **/ +s32 igb_check_alt_mac_addr(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = 0; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + goto out; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * igb_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +/** + * igb_mta_set - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +void igb_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + /* The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = array_rd32(E1000_MTA, hash_reg); + + mta |= BIT(hash_bit); + + array_wr32(E1000_MTA, hash_reg, mta); + wrfl(); +} + +/** + * igb_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igb_mta_set() + **/ +static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igb_i21x_hw_doublecheck - double checks potential HW issue in i21X + * @hw: pointer to the HW structure + * + * Checks if multicast array is wrote correctly + * If not then rewrites again to register + **/ +static void igb_i21x_hw_doublecheck(struct e1000_hw *hw) +{ + bool is_failed; + int i; + + do { + is_failed = false; + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) { + if (array_rd32(E1000_MTA, i) != hw->mac.mta_shadow[i]) { + is_failed = true; + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + break; + } + } + } while (is_failed); +} + +/** + * igb_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = igb_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + igb_i21x_hw_doublecheck(hw); +} + +/** + * igb_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void igb_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + rd32(E1000_CRCERRS); + rd32(E1000_SYMERRS); + rd32(E1000_MPC); + rd32(E1000_SCC); + rd32(E1000_ECOL); + rd32(E1000_MCC); + rd32(E1000_LATECOL); + rd32(E1000_COLC); + rd32(E1000_DC); + rd32(E1000_SEC); + rd32(E1000_RLEC); + rd32(E1000_XONRXC); + rd32(E1000_XONTXC); + rd32(E1000_XOFFRXC); + rd32(E1000_XOFFTXC); + rd32(E1000_FCRUC); + rd32(E1000_GPRC); + rd32(E1000_BPRC); + rd32(E1000_MPRC); + rd32(E1000_GPTC); + rd32(E1000_GORCL); + rd32(E1000_GORCH); + rd32(E1000_GOTCL); + rd32(E1000_GOTCH); + rd32(E1000_RNBC); + rd32(E1000_RUC); + rd32(E1000_RFC); + rd32(E1000_ROC); + rd32(E1000_RJC); + rd32(E1000_TORL); + rd32(E1000_TORH); + rd32(E1000_TOTL); + rd32(E1000_TOTH); + rd32(E1000_TPR); + rd32(E1000_TPT); + rd32(E1000_MPTC); + rd32(E1000_BPTC); +} + +/** + * igb_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 igb_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igb_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igb_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * igb_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 igb_setup_link(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igb_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = igb_set_default_fc(hw); + if (ret_val) + goto out; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(E1000_FCT, FLOW_CONTROL_TYPE); + wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(E1000_FCTTV, hw->fc.pause_time); + + igb_set_fc_watermarks(hw); + +out: + + return ret_val; +} + +/** + * igb_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void igb_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = rd32(E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + wr32(E1000_TCTL, tctl); + wrfl(); +} + +/** + * igb_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + **/ +static void igb_set_fc_watermarks(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(E1000_FCRTL, fcrtl); + wr32(E1000_FCRTH, fcrth); +} + +/** + * igb_set_default_fc - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 igb_set_default_fc(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 lan_offset; + u16 nvm_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == e1000_i350) + lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + else + lan_offset = 0; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset, + 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + +out: + return ret_val; +} + +/** + * igb_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 igb_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = 0; + + ctrl = rd32(E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + wr32(E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igb_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 igb_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = igb_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = igb_force_mac_fc(hw); + } + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == e1000_fc_none) || + (hw->fc.requested_mode == e1000_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) + && mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = rd32(E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + hw_dbg("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = rd32(E1000_PCS_ANADV); + pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = rd32(E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + wr32(E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + +out: + return ret_val; +} + +/** + * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igb_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 igb_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = 0; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void igb_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = rd32(E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + wr32(E1000_SWSM, swsm); +} + +/** + * igb_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 igb_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + s32 ret_val = 0; + + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * igb_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 igb_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + /* i210 and i211 devices have different LED mechanism */ + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) + ret_val = igb_valid_led_default_i210(hw, &data); + else + ret_val = igb_valid_led_default(hw, &data); + + if (ret_val) + goto out; + + mac->ledctl_default = rd32(E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * igb_cleanup_led - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 igb_cleanup_led(struct e1000_hw *hw) +{ + wr32(E1000_LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * igb_blink_led - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + **/ +s32 igb_blink_led(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + wr32(E1000_LEDCTL, ledctl_blink); + + return 0; +} + +/** + * igb_led_off - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 igb_led_off(struct e1000_hw *hw) +{ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * igb_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 igb_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + wr32(E1000_CTRL, ctrl); + + while (timeout) { + if (!(rd32(E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_mdi_setting - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotitation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +s32 igb_validate_mdi_setting(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* All MDI settings are supported on 82580 and newer. */ + if (hw->mac.type >= e1000_82580) + goto out; + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + hw_dbg("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + s32 ret_val = 0; + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + wr32(reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + udelay(5); + regvalue = rd32(reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + hw_dbg("Reg %08x did not indicate ready\n", reg); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool igb_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = false; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(E1000_FWSM); + factps = rd32(E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} diff --git a/devices/igb/e1000_mac-5.14-orig.h b/devices/igb/e1000_mac-5.14-orig.h new file mode 100644 index 00000000..6e110f28 --- /dev/null +++ b/devices/igb/e1000_mac-5.14-orig.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +#include "e1000_hw.h" + +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_defines.h" +#include "e1000_i210.h" + +/* Functions that should not be called directly from drivers but can be used + * by other files in this 'shared code' + */ +s32 igb_blink_led(struct e1000_hw *hw); +s32 igb_check_for_copper_link(struct e1000_hw *hw); +s32 igb_cleanup_led(struct e1000_hw *hw); +s32 igb_config_fc_after_link_up(struct e1000_hw *hw); +s32 igb_disable_pcie_master(struct e1000_hw *hw); +s32 igb_force_mac_fc(struct e1000_hw *hw); +s32 igb_get_auto_rd_done(struct e1000_hw *hw); +s32 igb_get_bus_info_pcie(struct e1000_hw *hw); +s32 igb_get_hw_semaphore(struct e1000_hw *hw); +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 igb_id_led_init(struct e1000_hw *hw); +s32 igb_led_off(struct e1000_hw *hw); +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 igb_setup_link(struct e1000_hw *hw); +s32 igb_validate_mdi_setting(struct e1000_hw *hw); +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +void igb_clear_hw_cntrs_base(struct e1000_hw *hw); +void igb_clear_vfta(struct e1000_hw *hw); +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, u32 vind, + bool vlan_on, bool vlvf_bypass); +void igb_config_collision_dist(struct e1000_hw *hw); +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void igb_mta_set(struct e1000_hw *hw, u32 hash_value); +void igb_put_hw_semaphore(struct e1000_hw *hw); +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +s32 igb_check_alt_mac_addr(struct e1000_hw *hw); + +bool igb_enable_mng_pass_thru(struct e1000_hw *hw); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +void e1000_init_function_pointers_82575(struct e1000_hw *hw); + +#endif diff --git a/devices/igb/e1000_mac-6.1-ethercat.c b/devices/igb/e1000_mac-6.1-ethercat.c new file mode 100644 index 00000000..c48b4161 --- /dev/null +++ b/devices/igb/e1000_mac-6.1-ethercat.c @@ -0,0 +1,1685 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include +#include +#include +#include + +#include "e1000_mac-6.1-ethercat.h" + +#include "igb-6.1-ethercat.h" + +static s32 igb_set_default_fc(struct e1000_hw *hw); +static void igb_set_fc_watermarks(struct e1000_hw *hw); + +/** + * igb_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 igb_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u32 reg; + u16 pcie_link_status; + + bus->type = e1000_bus_type_pci_express; + + ret_val = igb_read_pcie_cap_reg(hw, + PCI_EXP_LNKSTA, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + bus->speed = e1000_bus_speed_2500; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT); + } + + reg = rd32(E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + + return 0; +} + +/** + * igb_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void igb_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;) + hw->mac.ops.write_vfta(hw, offset, 0); +} + +/** + * igb_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + + array_wr32(E1000_VFTA, offset, value); + wrfl(); + + adapter->shadow_vfta[offset] = value; +} + +/** + * igb_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = {0}; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igb_find_vlvf_slot - find the VLAN id or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vlvf_bypass: skip VLVF if no match is found + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass) +{ + s32 regindex, first_empty_slot; + u32 bits; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0; + + /* Search for the VLAN id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) { + bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK; + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) + first_empty_slot = regindex; + } + + return first_empty_slot ? : -E1000_ERR_NO_SPACE; +} + +/** + * igb_vfta_set - enable or disable vlan in VLAN filter table + * @hw: pointer to the HW structure + * @vlan: VLAN id to add or remove + * @vind: VMDq output index that maps queue to VLAN id + * @vlan_on: if true add filter, if false remove + * @vlvf_bypass: skip VLVF if no match is found + * + * Sets or clears a bit in the VLAN filter table array based on VLAN id + * and if we are adding or removing the filter + **/ +s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + struct igb_adapter *adapter = hw->back; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; + + if ((vlan > 4095) || (vind > 7)) + return -E1000_ERR_PARAM; + + /* this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regidx = vlan / 32; + vfta_delta = BIT(vlan % 32); + vfta = adapter->shadow_vfta[regidx]; + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; + + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the VLAN is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + if (!adapter->vfs_allocated_count) + goto vfta_update; + + vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; + } + + bits = rd32(E1000_VLVF(vlvf_index)); + + /* set the pool bit */ + bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); + + if (!(bits & E1000_VLVF_POOLSEL_MASK)) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool + */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + /* disable VLVF and clear remaining bit from pool */ + wr32(E1000_VLVF(vlvf_index), 0); + + return 0; + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE); + +vfta_update: + /* bit was set/cleared before we started */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + return 0; +} + +/** + * igb_check_alt_mac_addr - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is saved in the hw struct and + * programmed into RAR0 and the function returns success, otherwise the + * function returns an error. + **/ +s32 igb_check_alt_mac_addr(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = 0; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + goto out; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * igb_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +/** + * igb_mta_set - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +void igb_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + /* The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = array_rd32(E1000_MTA, hash_reg); + + mta |= BIT(hash_bit); + + array_wr32(E1000_MTA, hash_reg, mta); + wrfl(); +} + +/** + * igb_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igb_mta_set() + **/ +static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 1; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF && bit_shift < 4) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igb_i21x_hw_doublecheck - double checks potential HW issue in i21X + * @hw: pointer to the HW structure + * + * Checks if multicast array is wrote correctly + * If not then rewrites again to register + **/ +static void igb_i21x_hw_doublecheck(struct e1000_hw *hw) +{ + int failed_cnt = 3; + bool is_failed; + int i; + + do { + is_failed = false; + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) { + if (array_rd32(E1000_MTA, i) != hw->mac.mta_shadow[i]) { + is_failed = true; + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + } + } + if (is_failed && --failed_cnt <= 0) { + hw_dbg("Failed to update MTA_REGISTER, too many retries"); + break; + } + } while (is_failed); +} + +/** + * igb_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = igb_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + igb_i21x_hw_doublecheck(hw); +} + +/** + * igb_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void igb_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + rd32(E1000_CRCERRS); + rd32(E1000_SYMERRS); + rd32(E1000_MPC); + rd32(E1000_SCC); + rd32(E1000_ECOL); + rd32(E1000_MCC); + rd32(E1000_LATECOL); + rd32(E1000_COLC); + rd32(E1000_DC); + rd32(E1000_SEC); + rd32(E1000_RLEC); + rd32(E1000_XONRXC); + rd32(E1000_XONTXC); + rd32(E1000_XOFFRXC); + rd32(E1000_XOFFTXC); + rd32(E1000_FCRUC); + rd32(E1000_GPRC); + rd32(E1000_BPRC); + rd32(E1000_MPRC); + rd32(E1000_GPTC); + rd32(E1000_GORCL); + rd32(E1000_GORCH); + rd32(E1000_GOTCL); + rd32(E1000_GOTCH); + rd32(E1000_RNBC); + rd32(E1000_RUC); + rd32(E1000_RFC); + rd32(E1000_ROC); + rd32(E1000_RJC); + rd32(E1000_TORL); + rd32(E1000_TORH); + rd32(E1000_TOTL); + rd32(E1000_TOTH); + rd32(E1000_TPR); + rd32(E1000_TPT); + rd32(E1000_MPTC); + rd32(E1000_BPTC); +} + +/** + * igb_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 igb_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igb_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igb_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * igb_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 igb_setup_link(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igb_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = igb_set_default_fc(hw); + if (ret_val) + goto out; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(E1000_FCT, FLOW_CONTROL_TYPE); + wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(E1000_FCTTV, hw->fc.pause_time); + + igb_set_fc_watermarks(hw); + +out: + + return ret_val; +} + +/** + * igb_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void igb_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = rd32(E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + wr32(E1000_TCTL, tctl); + wrfl(); +} + +/** + * igb_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + **/ +static void igb_set_fc_watermarks(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(E1000_FCRTL, fcrtl); + wr32(E1000_FCRTH, fcrth); +} + +/** + * igb_set_default_fc - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 igb_set_default_fc(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 lan_offset; + u16 nvm_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == e1000_i350) + lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + else + lan_offset = 0; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset, + 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + +out: + return ret_val; +} + +/** + * igb_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 igb_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = 0; + + ctrl = rd32(E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + wr32(E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igb_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 igb_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = igb_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = igb_force_mac_fc(hw); + } + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == e1000_fc_none) || + (hw->fc.requested_mode == e1000_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) + && mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = rd32(E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + hw_dbg("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = rd32(E1000_PCS_ANADV); + pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = rd32(E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + wr32(E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + +out: + return ret_val; +} + +/** + * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igb_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 igb_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = 0; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void igb_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = rd32(E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + wr32(E1000_SWSM, swsm); +} + +/** + * igb_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 igb_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + s32 ret_val = 0; + + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * igb_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 igb_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + /* i210 and i211 devices have different LED mechanism */ + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) + ret_val = igb_valid_led_default_i210(hw, &data); + else + ret_val = igb_valid_led_default(hw, &data); + + if (ret_val) + goto out; + + mac->ledctl_default = rd32(E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * igb_cleanup_led - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 igb_cleanup_led(struct e1000_hw *hw) +{ + wr32(E1000_LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * igb_blink_led - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + **/ +s32 igb_blink_led(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + wr32(E1000_LEDCTL, ledctl_blink); + + return 0; +} + +/** + * igb_led_off - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 igb_led_off(struct e1000_hw *hw) +{ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * igb_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 igb_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + wr32(E1000_CTRL, ctrl); + + while (timeout) { + if (!(rd32(E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_mdi_setting - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotitation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +s32 igb_validate_mdi_setting(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* All MDI settings are supported on 82580 and newer. */ + if (hw->mac.type >= e1000_82580) + goto out; + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + hw_dbg("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + s32 ret_val = 0; + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + wr32(reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + udelay(5); + regvalue = rd32(reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + hw_dbg("Reg %08x did not indicate ready\n", reg); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool igb_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = false; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(E1000_FWSM); + factps = rd32(E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} diff --git a/devices/igb/e1000_mac-6.1-ethercat.h b/devices/igb/e1000_mac-6.1-ethercat.h new file mode 100644 index 00000000..d5af6c91 --- /dev/null +++ b/devices/igb/e1000_mac-6.1-ethercat.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +#include "e1000_hw-6.1-ethercat.h" + +#include "e1000_phy-6.1-ethercat.h" +#include "e1000_nvm-6.1-ethercat.h" +#include "e1000_defines-6.1-ethercat.h" +#include "e1000_i210-6.1-ethercat.h" + +/* Functions that should not be called directly from drivers but can be used + * by other files in this 'shared code' + */ +s32 igb_blink_led(struct e1000_hw *hw); +s32 igb_check_for_copper_link(struct e1000_hw *hw); +s32 igb_cleanup_led(struct e1000_hw *hw); +s32 igb_config_fc_after_link_up(struct e1000_hw *hw); +s32 igb_disable_pcie_master(struct e1000_hw *hw); +s32 igb_force_mac_fc(struct e1000_hw *hw); +s32 igb_get_auto_rd_done(struct e1000_hw *hw); +s32 igb_get_bus_info_pcie(struct e1000_hw *hw); +s32 igb_get_hw_semaphore(struct e1000_hw *hw); +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 igb_id_led_init(struct e1000_hw *hw); +s32 igb_led_off(struct e1000_hw *hw); +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 igb_setup_link(struct e1000_hw *hw); +s32 igb_validate_mdi_setting(struct e1000_hw *hw); +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +void igb_clear_hw_cntrs_base(struct e1000_hw *hw); +void igb_clear_vfta(struct e1000_hw *hw); +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, u32 vind, + bool vlan_on, bool vlvf_bypass); +void igb_config_collision_dist(struct e1000_hw *hw); +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void igb_mta_set(struct e1000_hw *hw, u32 hash_value); +void igb_put_hw_semaphore(struct e1000_hw *hw); +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +s32 igb_check_alt_mac_addr(struct e1000_hw *hw); + +bool igb_enable_mng_pass_thru(struct e1000_hw *hw); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +void e1000_init_function_pointers_82575(struct e1000_hw *hw); + +#endif diff --git a/devices/igb/e1000_mac-6.1-orig.c b/devices/igb/e1000_mac-6.1-orig.c new file mode 100644 index 00000000..caf91c6f --- /dev/null +++ b/devices/igb/e1000_mac-6.1-orig.c @@ -0,0 +1,1685 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include +#include +#include +#include + +#include "e1000_mac.h" + +#include "igb.h" + +static s32 igb_set_default_fc(struct e1000_hw *hw); +static void igb_set_fc_watermarks(struct e1000_hw *hw); + +/** + * igb_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 igb_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u32 reg; + u16 pcie_link_status; + + bus->type = e1000_bus_type_pci_express; + + ret_val = igb_read_pcie_cap_reg(hw, + PCI_EXP_LNKSTA, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + bus->speed = e1000_bus_speed_2500; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT); + } + + reg = rd32(E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + + return 0; +} + +/** + * igb_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void igb_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;) + hw->mac.ops.write_vfta(hw, offset, 0); +} + +/** + * igb_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + + array_wr32(E1000_VFTA, offset, value); + wrfl(); + + adapter->shadow_vfta[offset] = value; +} + +/** + * igb_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = {0}; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igb_find_vlvf_slot - find the VLAN id or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vlvf_bypass: skip VLVF if no match is found + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass) +{ + s32 regindex, first_empty_slot; + u32 bits; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0; + + /* Search for the VLAN id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) { + bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK; + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) + first_empty_slot = regindex; + } + + return first_empty_slot ? : -E1000_ERR_NO_SPACE; +} + +/** + * igb_vfta_set - enable or disable vlan in VLAN filter table + * @hw: pointer to the HW structure + * @vlan: VLAN id to add or remove + * @vind: VMDq output index that maps queue to VLAN id + * @vlan_on: if true add filter, if false remove + * @vlvf_bypass: skip VLVF if no match is found + * + * Sets or clears a bit in the VLAN filter table array based on VLAN id + * and if we are adding or removing the filter + **/ +s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) +{ + struct igb_adapter *adapter = hw->back; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; + + if ((vlan > 4095) || (vind > 7)) + return -E1000_ERR_PARAM; + + /* this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regidx = vlan / 32; + vfta_delta = BIT(vlan % 32); + vfta = adapter->shadow_vfta[regidx]; + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; + + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the VLAN is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + if (!adapter->vfs_allocated_count) + goto vfta_update; + + vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; + } + + bits = rd32(E1000_VLVF(vlvf_index)); + + /* set the pool bit */ + bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); + + if (!(bits & E1000_VLVF_POOLSEL_MASK)) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool + */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + /* disable VLVF and clear remaining bit from pool */ + wr32(E1000_VLVF(vlvf_index), 0); + + return 0; + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE); + +vfta_update: + /* bit was set/cleared before we started */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + return 0; +} + +/** + * igb_check_alt_mac_addr - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is saved in the hw struct and + * programmed into RAR0 and the function returns success, otherwise the + * function returns an error. + **/ +s32 igb_check_alt_mac_addr(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = 0; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + goto out; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * igb_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +/** + * igb_mta_set - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +void igb_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + /* The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = array_rd32(E1000_MTA, hash_reg); + + mta |= BIT(hash_bit); + + array_wr32(E1000_MTA, hash_reg, mta); + wrfl(); +} + +/** + * igb_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igb_mta_set() + **/ +static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 1; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF && bit_shift < 4) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igb_i21x_hw_doublecheck - double checks potential HW issue in i21X + * @hw: pointer to the HW structure + * + * Checks if multicast array is wrote correctly + * If not then rewrites again to register + **/ +static void igb_i21x_hw_doublecheck(struct e1000_hw *hw) +{ + int failed_cnt = 3; + bool is_failed; + int i; + + do { + is_failed = false; + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) { + if (array_rd32(E1000_MTA, i) != hw->mac.mta_shadow[i]) { + is_failed = true; + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + } + } + if (is_failed && --failed_cnt <= 0) { + hw_dbg("Failed to update MTA_REGISTER, too many retries"); + break; + } + } while (is_failed); +} + +/** + * igb_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = igb_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + igb_i21x_hw_doublecheck(hw); +} + +/** + * igb_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void igb_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + rd32(E1000_CRCERRS); + rd32(E1000_SYMERRS); + rd32(E1000_MPC); + rd32(E1000_SCC); + rd32(E1000_ECOL); + rd32(E1000_MCC); + rd32(E1000_LATECOL); + rd32(E1000_COLC); + rd32(E1000_DC); + rd32(E1000_SEC); + rd32(E1000_RLEC); + rd32(E1000_XONRXC); + rd32(E1000_XONTXC); + rd32(E1000_XOFFRXC); + rd32(E1000_XOFFTXC); + rd32(E1000_FCRUC); + rd32(E1000_GPRC); + rd32(E1000_BPRC); + rd32(E1000_MPRC); + rd32(E1000_GPTC); + rd32(E1000_GORCL); + rd32(E1000_GORCH); + rd32(E1000_GOTCL); + rd32(E1000_GOTCH); + rd32(E1000_RNBC); + rd32(E1000_RUC); + rd32(E1000_RFC); + rd32(E1000_ROC); + rd32(E1000_RJC); + rd32(E1000_TORL); + rd32(E1000_TORH); + rd32(E1000_TOTL); + rd32(E1000_TOTH); + rd32(E1000_TPR); + rd32(E1000_TPT); + rd32(E1000_MPTC); + rd32(E1000_BPTC); +} + +/** + * igb_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 igb_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igb_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igb_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * igb_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 igb_setup_link(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igb_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = igb_set_default_fc(hw); + if (ret_val) + goto out; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(E1000_FCT, FLOW_CONTROL_TYPE); + wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(E1000_FCTTV, hw->fc.pause_time); + + igb_set_fc_watermarks(hw); + +out: + + return ret_val; +} + +/** + * igb_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void igb_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = rd32(E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + wr32(E1000_TCTL, tctl); + wrfl(); +} + +/** + * igb_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + **/ +static void igb_set_fc_watermarks(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(E1000_FCRTL, fcrtl); + wr32(E1000_FCRTH, fcrth); +} + +/** + * igb_set_default_fc - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 igb_set_default_fc(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 lan_offset; + u16 nvm_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == e1000_i350) + lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + else + lan_offset = 0; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset, + 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + +out: + return ret_val; +} + +/** + * igb_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 igb_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = 0; + + ctrl = rd32(E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + wr32(E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igb_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 igb_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = igb_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = igb_force_mac_fc(hw); + } + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == e1000_fc_none) || + (hw->fc.requested_mode == e1000_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) + && mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = rd32(E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + hw_dbg("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = rd32(E1000_PCS_ANADV); + pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = rd32(E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + wr32(E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + +out: + return ret_val; +} + +/** + * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igb_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 igb_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = 0; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void igb_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = rd32(E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + wr32(E1000_SWSM, swsm); +} + +/** + * igb_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 igb_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + s32 ret_val = 0; + + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * igb_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 igb_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + /* i210 and i211 devices have different LED mechanism */ + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) + ret_val = igb_valid_led_default_i210(hw, &data); + else + ret_val = igb_valid_led_default(hw, &data); + + if (ret_val) + goto out; + + mac->ledctl_default = rd32(E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * igb_cleanup_led - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 igb_cleanup_led(struct e1000_hw *hw) +{ + wr32(E1000_LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * igb_blink_led - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + **/ +s32 igb_blink_led(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + wr32(E1000_LEDCTL, ledctl_blink); + + return 0; +} + +/** + * igb_led_off - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 igb_led_off(struct e1000_hw *hw) +{ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * igb_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 igb_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + wr32(E1000_CTRL, ctrl); + + while (timeout) { + if (!(rd32(E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_mdi_setting - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotitation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +s32 igb_validate_mdi_setting(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* All MDI settings are supported on 82580 and newer. */ + if (hw->mac.type >= e1000_82580) + goto out; + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + hw_dbg("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + s32 ret_val = 0; + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + wr32(reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + udelay(5); + regvalue = rd32(reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + hw_dbg("Reg %08x did not indicate ready\n", reg); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool igb_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = false; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(E1000_FWSM); + factps = rd32(E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} diff --git a/devices/igb/e1000_mac-6.1-orig.h b/devices/igb/e1000_mac-6.1-orig.h new file mode 100644 index 00000000..6e110f28 --- /dev/null +++ b/devices/igb/e1000_mac-6.1-orig.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +#include "e1000_hw.h" + +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_defines.h" +#include "e1000_i210.h" + +/* Functions that should not be called directly from drivers but can be used + * by other files in this 'shared code' + */ +s32 igb_blink_led(struct e1000_hw *hw); +s32 igb_check_for_copper_link(struct e1000_hw *hw); +s32 igb_cleanup_led(struct e1000_hw *hw); +s32 igb_config_fc_after_link_up(struct e1000_hw *hw); +s32 igb_disable_pcie_master(struct e1000_hw *hw); +s32 igb_force_mac_fc(struct e1000_hw *hw); +s32 igb_get_auto_rd_done(struct e1000_hw *hw); +s32 igb_get_bus_info_pcie(struct e1000_hw *hw); +s32 igb_get_hw_semaphore(struct e1000_hw *hw); +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 igb_id_led_init(struct e1000_hw *hw); +s32 igb_led_off(struct e1000_hw *hw); +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 igb_setup_link(struct e1000_hw *hw); +s32 igb_validate_mdi_setting(struct e1000_hw *hw); +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +void igb_clear_hw_cntrs_base(struct e1000_hw *hw); +void igb_clear_vfta(struct e1000_hw *hw); +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, u32 vind, + bool vlan_on, bool vlvf_bypass); +void igb_config_collision_dist(struct e1000_hw *hw); +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void igb_mta_set(struct e1000_hw *hw, u32 hash_value); +void igb_put_hw_semaphore(struct e1000_hw *hw); +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +s32 igb_check_alt_mac_addr(struct e1000_hw *hw); + +bool igb_enable_mng_pass_thru(struct e1000_hw *hw); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +void e1000_init_function_pointers_82575(struct e1000_hw *hw); + +#endif diff --git a/devices/igb/e1000_mbx-5.14-ethercat.c b/devices/igb/e1000_mbx-5.14-ethercat.c new file mode 100644 index 00000000..71d7cc00 --- /dev/null +++ b/devices/igb/e1000_mbx-5.14-ethercat.c @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include "e1000_mbx-5.14-ethercat.h" + +/** + * igb_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * @unlock: skip locking or not + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id, unlock); + + return ret_val; +} + +/** + * igb_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = 0; + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * igb_unlock_mbx - unlock the mailbox + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the mailbox was unlocked or else ERR_MBX + **/ +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.unlock) + ret_val = mbx->ops.unlock(hw, mbx_id); + + return ret_val; +} + +/** + * igb_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = igb_poll_for_msg(hw, mbx_id); + + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id, true); +out: + return ret_val; +} + +/** + * igb_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = igb_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = rd32(E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = 0; + wr32(E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * igb_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * igb_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * igb_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = rd32(E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + if (vflre & BIT(vf_number)) { + ret_val = 0; + wr32(E1000_VFLRE, BIT(vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * igb_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + int count = 10; + + do { + /* Take ownership of the buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { + ret_val = 0; + break; + } + udelay(1000); + } while (count-- > 0); + + return ret_val; +} + +/** + * igb_release_mbx_lock_pf - release mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we released the mailbox lock + **/ +static s32 igb_release_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 p2v_mailbox; + + /* drop PF lock of mailbox, if set */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + wr32(E1000_P2VMAILBOX(vf_number), + p2v_mailbox & ~E1000_P2VMAILBOX_PFU); + + return 0; +} + +/** + * igb_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + igb_check_for_msg_pf(hw, vf_number); + igb_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * igb_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * @unlock: unlock the mailbox when done? + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number, bool unlock) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release mailbox lock (or not) */ + if (unlock) + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + else + wr32(E1000_P2VMAILBOX(vf_number), + E1000_P2VMAILBOX_ACK | E1000_P2VMAILBOX_PFU); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * igb_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 igb_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = igb_read_mbx_pf; + mbx->ops.write = igb_write_mbx_pf; + mbx->ops.read_posted = igb_read_posted_mbx; + mbx->ops.write_posted = igb_write_posted_mbx; + mbx->ops.check_for_msg = igb_check_for_msg_pf; + mbx->ops.check_for_ack = igb_check_for_ack_pf; + mbx->ops.check_for_rst = igb_check_for_rst_pf; + mbx->ops.unlock = igb_release_mbx_lock_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return 0; +} diff --git a/devices/igb/e1000_mbx-5.14-ethercat.h b/devices/igb/e1000_mbx-5.14-ethercat.h new file mode 100644 index 00000000..a1c10aae --- /dev/null +++ b/devices/igb/e1000_mbx-5.14-ethercat.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_hw-5.14-ethercat.h" + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +/* VF requests to clear all unicast MAC filters */ +#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT) +/* VF requests to add unicast MAC filter */ +#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id); +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id); +s32 igb_init_mbx_params_pf(struct e1000_hw *hw); + +#endif /* _E1000_MBX_H_ */ diff --git a/devices/igb/e1000_mbx-5.14-orig.c b/devices/igb/e1000_mbx-5.14-orig.c new file mode 100644 index 00000000..29383112 --- /dev/null +++ b/devices/igb/e1000_mbx-5.14-orig.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include "e1000_mbx.h" + +/** + * igb_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * @unlock: skip locking or not + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id, unlock); + + return ret_val; +} + +/** + * igb_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = 0; + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * igb_unlock_mbx - unlock the mailbox + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the mailbox was unlocked or else ERR_MBX + **/ +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.unlock) + ret_val = mbx->ops.unlock(hw, mbx_id); + + return ret_val; +} + +/** + * igb_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = igb_poll_for_msg(hw, mbx_id); + + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id, true); +out: + return ret_val; +} + +/** + * igb_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = igb_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = rd32(E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = 0; + wr32(E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * igb_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * igb_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * igb_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = rd32(E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + if (vflre & BIT(vf_number)) { + ret_val = 0; + wr32(E1000_VFLRE, BIT(vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * igb_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + int count = 10; + + do { + /* Take ownership of the buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { + ret_val = 0; + break; + } + udelay(1000); + } while (count-- > 0); + + return ret_val; +} + +/** + * igb_release_mbx_lock_pf - release mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we released the mailbox lock + **/ +static s32 igb_release_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 p2v_mailbox; + + /* drop PF lock of mailbox, if set */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + wr32(E1000_P2VMAILBOX(vf_number), + p2v_mailbox & ~E1000_P2VMAILBOX_PFU); + + return 0; +} + +/** + * igb_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + igb_check_for_msg_pf(hw, vf_number); + igb_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * igb_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * @unlock: unlock the mailbox when done? + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number, bool unlock) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release mailbox lock (or not) */ + if (unlock) + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + else + wr32(E1000_P2VMAILBOX(vf_number), + E1000_P2VMAILBOX_ACK | E1000_P2VMAILBOX_PFU); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * igb_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 igb_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = igb_read_mbx_pf; + mbx->ops.write = igb_write_mbx_pf; + mbx->ops.read_posted = igb_read_posted_mbx; + mbx->ops.write_posted = igb_write_posted_mbx; + mbx->ops.check_for_msg = igb_check_for_msg_pf; + mbx->ops.check_for_ack = igb_check_for_ack_pf; + mbx->ops.check_for_rst = igb_check_for_rst_pf; + mbx->ops.unlock = igb_release_mbx_lock_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return 0; +} + diff --git a/devices/igb/e1000_mbx-5.14-orig.h b/devices/igb/e1000_mbx-5.14-orig.h new file mode 100644 index 00000000..178e60ec --- /dev/null +++ b/devices/igb/e1000_mbx-5.14-orig.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_hw.h" + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +/* VF requests to clear all unicast MAC filters */ +#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT) +/* VF requests to add unicast MAC filter */ +#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id); +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id); +s32 igb_init_mbx_params_pf(struct e1000_hw *hw); + +#endif /* _E1000_MBX_H_ */ diff --git a/devices/igb/e1000_mbx-6.1-ethercat.c b/devices/igb/e1000_mbx-6.1-ethercat.c new file mode 100644 index 00000000..c579beea --- /dev/null +++ b/devices/igb/e1000_mbx-6.1-ethercat.c @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include "e1000_mbx-6.1-ethercat.h" + +/** + * igb_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * @unlock: skip locking or not + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id, unlock); + + return ret_val; +} + +/** + * igb_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = 0; + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * igb_unlock_mbx - unlock the mailbox + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the mailbox was unlocked or else ERR_MBX + **/ +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.unlock) + ret_val = mbx->ops.unlock(hw, mbx_id); + + return ret_val; +} + +/** + * igb_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = igb_poll_for_msg(hw, mbx_id); + + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id, true); +out: + return ret_val; +} + +/** + * igb_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = igb_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = rd32(E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = 0; + wr32(E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * igb_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * igb_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * igb_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = rd32(E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + if (vflre & BIT(vf_number)) { + ret_val = 0; + wr32(E1000_VFLRE, BIT(vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * igb_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + int count = 10; + + do { + /* Take ownership of the buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { + ret_val = 0; + break; + } + udelay(1000); + } while (count-- > 0); + + return ret_val; +} + +/** + * igb_release_mbx_lock_pf - release mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we released the mailbox lock + **/ +static s32 igb_release_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 p2v_mailbox; + + /* drop PF lock of mailbox, if set */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + wr32(E1000_P2VMAILBOX(vf_number), + p2v_mailbox & ~E1000_P2VMAILBOX_PFU); + + return 0; +} + +/** + * igb_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + igb_check_for_msg_pf(hw, vf_number); + igb_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * igb_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * @unlock: unlock the mailbox when done? + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number, bool unlock) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release mailbox lock (or not) */ + if (unlock) + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + else + wr32(E1000_P2VMAILBOX(vf_number), + E1000_P2VMAILBOX_ACK | E1000_P2VMAILBOX_PFU); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * igb_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 igb_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = igb_read_mbx_pf; + mbx->ops.write = igb_write_mbx_pf; + mbx->ops.read_posted = igb_read_posted_mbx; + mbx->ops.write_posted = igb_write_posted_mbx; + mbx->ops.check_for_msg = igb_check_for_msg_pf; + mbx->ops.check_for_ack = igb_check_for_ack_pf; + mbx->ops.check_for_rst = igb_check_for_rst_pf; + mbx->ops.unlock = igb_release_mbx_lock_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return 0; +} diff --git a/devices/igb/e1000_mbx-6.1-ethercat.h b/devices/igb/e1000_mbx-6.1-ethercat.h new file mode 100644 index 00000000..65f09fb1 --- /dev/null +++ b/devices/igb/e1000_mbx-6.1-ethercat.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_hw-6.1-ethercat.h" + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +/* VF requests to clear all unicast MAC filters */ +#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT) +/* VF requests to add unicast MAC filter */ +#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id); +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id); +s32 igb_init_mbx_params_pf(struct e1000_hw *hw); + +#endif /* _E1000_MBX_H_ */ diff --git a/devices/igb/e1000_mbx-6.1-orig.c b/devices/igb/e1000_mbx-6.1-orig.c new file mode 100644 index 00000000..29383112 --- /dev/null +++ b/devices/igb/e1000_mbx-6.1-orig.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include "e1000_mbx.h" + +/** + * igb_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * @unlock: skip locking or not + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id, unlock); + + return ret_val; +} + +/** + * igb_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = 0; + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * igb_unlock_mbx - unlock the mailbox + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the mailbox was unlocked or else ERR_MBX + **/ +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.unlock) + ret_val = mbx->ops.unlock(hw, mbx_id); + + return ret_val; +} + +/** + * igb_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = igb_poll_for_msg(hw, mbx_id); + + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id, true); +out: + return ret_val; +} + +/** + * igb_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = igb_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = rd32(E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = 0; + wr32(E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * igb_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * igb_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * igb_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = rd32(E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + if (vflre & BIT(vf_number)) { + ret_val = 0; + wr32(E1000_VFLRE, BIT(vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * igb_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + int count = 10; + + do { + /* Take ownership of the buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { + ret_val = 0; + break; + } + udelay(1000); + } while (count-- > 0); + + return ret_val; +} + +/** + * igb_release_mbx_lock_pf - release mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we released the mailbox lock + **/ +static s32 igb_release_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 p2v_mailbox; + + /* drop PF lock of mailbox, if set */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + wr32(E1000_P2VMAILBOX(vf_number), + p2v_mailbox & ~E1000_P2VMAILBOX_PFU); + + return 0; +} + +/** + * igb_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + igb_check_for_msg_pf(hw, vf_number); + igb_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * igb_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * @unlock: unlock the mailbox when done? + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number, bool unlock) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release mailbox lock (or not) */ + if (unlock) + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + else + wr32(E1000_P2VMAILBOX(vf_number), + E1000_P2VMAILBOX_ACK | E1000_P2VMAILBOX_PFU); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * igb_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 igb_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = igb_read_mbx_pf; + mbx->ops.write = igb_write_mbx_pf; + mbx->ops.read_posted = igb_read_posted_mbx; + mbx->ops.write_posted = igb_write_posted_mbx; + mbx->ops.check_for_msg = igb_check_for_msg_pf; + mbx->ops.check_for_ack = igb_check_for_ack_pf; + mbx->ops.check_for_rst = igb_check_for_rst_pf; + mbx->ops.unlock = igb_release_mbx_lock_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return 0; +} + diff --git a/devices/igb/e1000_mbx-6.1-orig.h b/devices/igb/e1000_mbx-6.1-orig.h new file mode 100644 index 00000000..178e60ec --- /dev/null +++ b/devices/igb/e1000_mbx-6.1-orig.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_hw.h" + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +/* VF requests to clear all unicast MAC filters */ +#define E1000_VF_MAC_FILTER_CLR (0x01 << E1000_VT_MSGINFO_SHIFT) +/* VF requests to add unicast MAC filter */ +#define E1000_VF_MAC_FILTER_ADD (0x02 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id); +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id); +s32 igb_init_mbx_params_pf(struct e1000_hw *hw); + +#endif /* _E1000_MBX_H_ */ diff --git a/devices/igb/e1000_nvm-5.14-ethercat.c b/devices/igb/e1000_nvm-5.14-ethercat.c new file mode 100644 index 00000000..00c70fb0 --- /dev/null +++ b/devices/igb/e1000_nvm-5.14-ethercat.c @@ -0,0 +1,782 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include + +#include "e1000_mac-5.14-ethercat.h" +#include "e1000_nvm-5.14-ethercat.h" + +/** + * igb_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u32 mask; + + mask = 1u << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + wr32(E1000_EECD, eecd); + wrfl(); + + udelay(nvm->delay_usec); + + igb_raise_eec_clk(hw, &eecd); + igb_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + wr32(E1000_EECD, eecd); +} + +/** + * igb_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = rd32(E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + igb_raise_eec_clk(hw, &eecd); + + eecd = rd32(E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + igb_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + s32 ret_val = -E1000_ERR_NVM; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = rd32(E1000_EERD); + else + reg = rd32(E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igb_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 igb_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = rd32(E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + s32 ret_val = 0; + + + wr32(E1000_EECD, eecd | E1000_EECD_REQ); + eecd = rd32(E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = rd32(E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -E1000_ERR_NVM; + } + + return ret_val; +} + +/** + * igb_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void igb_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = rd32(E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + igb_lower_eec_clk(hw, &eecd); + } +} + +/** + * igb_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void igb_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = rd32(E1000_EECD); + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); +} + +/** + * igb_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + s32 ret_val = 0; + u16 timeout = 0; + u8 spi_stat_reg; + + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + wr32(E1000_EECD, eecd); + wrfl(); + udelay(1); + timeout = NVM_MAX_RETRY_SPI; + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + igb_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + hw_dbg("SPI NVM Status error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igb_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = igb_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + wr32(E1000_EERD, eerd); + ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + **/ +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + igb_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + igb_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + igb_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + igb_standby_nvm(hw); + break; + } + } + usleep_range(1000, 2000); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * igb_read_part_string - Read device part number + * @hw: pointer to the HW structure + * @part_num: pointer to device part number + * @part_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in part_num. + **/ +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pointer; + u16 offset; + u16 length; + + if (part_num == NULL) { + hw_dbg("PBA string buffer was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pointer is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + hw_dbg("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (part_num_size < 11) { + hw_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pointer */ + part_num[0] = (nvm_data >> 12) & 0xF; + part_num[1] = (nvm_data >> 8) & 0xF; + part_num[2] = (nvm_data >> 4) & 0xF; + part_num[3] = nvm_data & 0xF; + part_num[4] = (pointer >> 12) & 0xF; + part_num[5] = (pointer >> 8) & 0xF; + part_num[6] = '-'; + part_num[7] = 0; + part_num[8] = (pointer >> 4) & 0xF; + part_num[9] = pointer & 0xF; + + /* put a null character on the end of our string */ + part_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (part_num[offset] < 0xA) + part_num[offset] += '0'; + else if (part_num[offset] < 0x10) + part_num[offset] += 'A' - 0xA; + } + + goto out; + } + + ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + hw_dbg("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + /* check if part_num buffer is big enough */ + if (part_num_size < (((u32)length * 2) - 1)) { + hw_dbg("PBA string buffer too small\n"); + ret_val = E1000_ERR_NO_SPACE; + goto out; + } + + /* trim pba length from start of string */ + pointer++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + part_num[offset * 2] = (u8)(nvm_data >> 8); + part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + part_num[offset * 2] = '\0'; + +out: + return ret_val; +} + +/** + * igb_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 igb_read_mac_addr(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(E1000_RAH(0)); + rar_low = rd32(E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igb_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igb_validate_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 igb_update_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output structure + * + * unsupported MAC types will return all 0 version structure + **/ +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + + /* basic eeprom version numbers and bits used vary by part and by tool + * used to create the nvm images. Check which data format we have. + */ + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + switch (hw->mac.type) { + case e1000_i211: + igb_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } + break; + case e1000_i210: + if (!(igb_get_flash_presence_i210(hw))) { + igb_read_invm_version(hw, fw_vers); + return; + } + fallthrough; + case e1000_i350: + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { + + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != NVM_VER_INVALID) && + (comb_verl != NVM_VER_INVALID))) { + + fw_vers->or_valid = true; + fw_vers->or_major = + comb_verl >> NVM_COMB_VER_SHFT; + fw_vers->or_build = + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); + fw_vers->or_patch = + comb_verh & NVM_COMB_VER_MASK; + } + } + break; + default: + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } +} diff --git a/devices/igb/e1000_nvm-5.14-ethercat.h b/devices/igb/e1000_nvm-5.14-ethercat.h new file mode 100644 index 00000000..091cddf4 --- /dev/null +++ b/devices/igb/e1000_nvm-5.14-ethercat.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +s32 igb_acquire_nvm(struct e1000_hw *hw); +void igb_release_nvm(struct e1000_hw *hw); +s32 igb_read_mac_addr(struct e1000_hw *hw); +s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, + u32 part_num_size); +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_validate_nvm_checksum(struct e1000_hw *hw); +s32 igb_update_nvm_checksum(struct e1000_hw *hw); + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers); + +#endif diff --git a/devices/igb/e1000_nvm-5.14-orig.c b/devices/igb/e1000_nvm-5.14-orig.c new file mode 100644 index 00000000..fa136e6e --- /dev/null +++ b/devices/igb/e1000_nvm-5.14-orig.c @@ -0,0 +1,782 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include + +#include "e1000_mac.h" +#include "e1000_nvm.h" + +/** + * igb_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u32 mask; + + mask = 1u << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + wr32(E1000_EECD, eecd); + wrfl(); + + udelay(nvm->delay_usec); + + igb_raise_eec_clk(hw, &eecd); + igb_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + wr32(E1000_EECD, eecd); +} + +/** + * igb_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = rd32(E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + igb_raise_eec_clk(hw, &eecd); + + eecd = rd32(E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + igb_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + s32 ret_val = -E1000_ERR_NVM; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = rd32(E1000_EERD); + else + reg = rd32(E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igb_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 igb_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = rd32(E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + s32 ret_val = 0; + + + wr32(E1000_EECD, eecd | E1000_EECD_REQ); + eecd = rd32(E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = rd32(E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -E1000_ERR_NVM; + } + + return ret_val; +} + +/** + * igb_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void igb_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = rd32(E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + igb_lower_eec_clk(hw, &eecd); + } +} + +/** + * igb_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void igb_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = rd32(E1000_EECD); + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); +} + +/** + * igb_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + s32 ret_val = 0; + u16 timeout = 0; + u8 spi_stat_reg; + + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + wr32(E1000_EECD, eecd); + wrfl(); + udelay(1); + timeout = NVM_MAX_RETRY_SPI; + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + igb_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + hw_dbg("SPI NVM Status error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igb_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = igb_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + wr32(E1000_EERD, eerd); + ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + **/ +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + igb_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + igb_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + igb_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + igb_standby_nvm(hw); + break; + } + } + usleep_range(1000, 2000); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * igb_read_part_string - Read device part number + * @hw: pointer to the HW structure + * @part_num: pointer to device part number + * @part_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in part_num. + **/ +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pointer; + u16 offset; + u16 length; + + if (part_num == NULL) { + hw_dbg("PBA string buffer was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pointer is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + hw_dbg("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (part_num_size < 11) { + hw_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pointer */ + part_num[0] = (nvm_data >> 12) & 0xF; + part_num[1] = (nvm_data >> 8) & 0xF; + part_num[2] = (nvm_data >> 4) & 0xF; + part_num[3] = nvm_data & 0xF; + part_num[4] = (pointer >> 12) & 0xF; + part_num[5] = (pointer >> 8) & 0xF; + part_num[6] = '-'; + part_num[7] = 0; + part_num[8] = (pointer >> 4) & 0xF; + part_num[9] = pointer & 0xF; + + /* put a null character on the end of our string */ + part_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (part_num[offset] < 0xA) + part_num[offset] += '0'; + else if (part_num[offset] < 0x10) + part_num[offset] += 'A' - 0xA; + } + + goto out; + } + + ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + hw_dbg("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + /* check if part_num buffer is big enough */ + if (part_num_size < (((u32)length * 2) - 1)) { + hw_dbg("PBA string buffer too small\n"); + ret_val = E1000_ERR_NO_SPACE; + goto out; + } + + /* trim pba length from start of string */ + pointer++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + part_num[offset * 2] = (u8)(nvm_data >> 8); + part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + part_num[offset * 2] = '\0'; + +out: + return ret_val; +} + +/** + * igb_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 igb_read_mac_addr(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(E1000_RAH(0)); + rar_low = rd32(E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igb_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igb_validate_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 igb_update_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output structure + * + * unsupported MAC types will return all 0 version structure + **/ +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + + /* basic eeprom version numbers and bits used vary by part and by tool + * used to create the nvm images. Check which data format we have. + */ + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + switch (hw->mac.type) { + case e1000_i211: + igb_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } + break; + case e1000_i210: + if (!(igb_get_flash_presence_i210(hw))) { + igb_read_invm_version(hw, fw_vers); + return; + } + fallthrough; + case e1000_i350: + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { + + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != NVM_VER_INVALID) && + (comb_verl != NVM_VER_INVALID))) { + + fw_vers->or_valid = true; + fw_vers->or_major = + comb_verl >> NVM_COMB_VER_SHFT; + fw_vers->or_build = + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); + fw_vers->or_patch = + comb_verh & NVM_COMB_VER_MASK; + } + } + break; + default: + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } +} diff --git a/devices/igb/e1000_nvm-5.14-orig.h b/devices/igb/e1000_nvm-5.14-orig.h new file mode 100644 index 00000000..091cddf4 --- /dev/null +++ b/devices/igb/e1000_nvm-5.14-orig.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +s32 igb_acquire_nvm(struct e1000_hw *hw); +void igb_release_nvm(struct e1000_hw *hw); +s32 igb_read_mac_addr(struct e1000_hw *hw); +s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, + u32 part_num_size); +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_validate_nvm_checksum(struct e1000_hw *hw); +s32 igb_update_nvm_checksum(struct e1000_hw *hw); + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers); + +#endif diff --git a/devices/igb/e1000_nvm-6.1-ethercat.c b/devices/igb/e1000_nvm-6.1-ethercat.c new file mode 100644 index 00000000..54f5e155 --- /dev/null +++ b/devices/igb/e1000_nvm-6.1-ethercat.c @@ -0,0 +1,782 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include + +#include "e1000_mac-6.1-ethercat.h" +#include "e1000_nvm-6.1-ethercat.h" + +/** + * igb_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u32 mask; + + mask = 1u << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + wr32(E1000_EECD, eecd); + wrfl(); + + udelay(nvm->delay_usec); + + igb_raise_eec_clk(hw, &eecd); + igb_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + wr32(E1000_EECD, eecd); +} + +/** + * igb_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = rd32(E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + igb_raise_eec_clk(hw, &eecd); + + eecd = rd32(E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + igb_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + s32 ret_val = -E1000_ERR_NVM; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = rd32(E1000_EERD); + else + reg = rd32(E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igb_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 igb_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = rd32(E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + s32 ret_val = 0; + + + wr32(E1000_EECD, eecd | E1000_EECD_REQ); + eecd = rd32(E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = rd32(E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -E1000_ERR_NVM; + } + + return ret_val; +} + +/** + * igb_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void igb_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = rd32(E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + igb_lower_eec_clk(hw, &eecd); + } +} + +/** + * igb_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void igb_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = rd32(E1000_EECD); + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); +} + +/** + * igb_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + s32 ret_val = 0; + u16 timeout = 0; + u8 spi_stat_reg; + + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + wr32(E1000_EECD, eecd); + wrfl(); + udelay(1); + timeout = NVM_MAX_RETRY_SPI; + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + igb_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + hw_dbg("SPI NVM Status error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igb_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = igb_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + wr32(E1000_EERD, eerd); + ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + **/ +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + igb_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + igb_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + igb_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + igb_standby_nvm(hw); + break; + } + } + usleep_range(1000, 2000); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * igb_read_part_string - Read device part number + * @hw: pointer to the HW structure + * @part_num: pointer to device part number + * @part_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in part_num. + **/ +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pointer; + u16 offset; + u16 length; + + if (part_num == NULL) { + hw_dbg("PBA string buffer was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pointer is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + hw_dbg("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (part_num_size < 11) { + hw_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pointer */ + part_num[0] = (nvm_data >> 12) & 0xF; + part_num[1] = (nvm_data >> 8) & 0xF; + part_num[2] = (nvm_data >> 4) & 0xF; + part_num[3] = nvm_data & 0xF; + part_num[4] = (pointer >> 12) & 0xF; + part_num[5] = (pointer >> 8) & 0xF; + part_num[6] = '-'; + part_num[7] = 0; + part_num[8] = (pointer >> 4) & 0xF; + part_num[9] = pointer & 0xF; + + /* put a null character on the end of our string */ + part_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (part_num[offset] < 0xA) + part_num[offset] += '0'; + else if (part_num[offset] < 0x10) + part_num[offset] += 'A' - 0xA; + } + + goto out; + } + + ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + hw_dbg("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + /* check if part_num buffer is big enough */ + if (part_num_size < (((u32)length * 2) - 1)) { + hw_dbg("PBA string buffer too small\n"); + ret_val = E1000_ERR_NO_SPACE; + goto out; + } + + /* trim pba length from start of string */ + pointer++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + part_num[offset * 2] = (u8)(nvm_data >> 8); + part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + part_num[offset * 2] = '\0'; + +out: + return ret_val; +} + +/** + * igb_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 igb_read_mac_addr(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(E1000_RAH(0)); + rar_low = rd32(E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igb_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igb_validate_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 igb_update_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output structure + * + * unsupported MAC types will return all 0 version structure + **/ +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + + /* basic eeprom version numbers and bits used vary by part and by tool + * used to create the nvm images. Check which data format we have. + */ + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + switch (hw->mac.type) { + case e1000_i211: + igb_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } + break; + case e1000_i210: + if (!(igb_get_flash_presence_i210(hw))) { + igb_read_invm_version(hw, fw_vers); + return; + } + fallthrough; + case e1000_i350: + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { + + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != NVM_VER_INVALID) && + (comb_verl != NVM_VER_INVALID))) { + + fw_vers->or_valid = true; + fw_vers->or_major = + comb_verl >> NVM_COMB_VER_SHFT; + fw_vers->or_build = + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); + fw_vers->or_patch = + comb_verh & NVM_COMB_VER_MASK; + } + } + break; + default: + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } +} diff --git a/devices/igb/e1000_nvm-6.1-ethercat.h b/devices/igb/e1000_nvm-6.1-ethercat.h new file mode 100644 index 00000000..091cddf4 --- /dev/null +++ b/devices/igb/e1000_nvm-6.1-ethercat.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +s32 igb_acquire_nvm(struct e1000_hw *hw); +void igb_release_nvm(struct e1000_hw *hw); +s32 igb_read_mac_addr(struct e1000_hw *hw); +s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, + u32 part_num_size); +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_validate_nvm_checksum(struct e1000_hw *hw); +s32 igb_update_nvm_checksum(struct e1000_hw *hw); + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers); + +#endif diff --git a/devices/igb/e1000_nvm-6.1-orig.c b/devices/igb/e1000_nvm-6.1-orig.c new file mode 100644 index 00000000..fa136e6e --- /dev/null +++ b/devices/igb/e1000_nvm-6.1-orig.c @@ -0,0 +1,782 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include + +#include "e1000_mac.h" +#include "e1000_nvm.h" + +/** + * igb_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u32 mask; + + mask = 1u << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + wr32(E1000_EECD, eecd); + wrfl(); + + udelay(nvm->delay_usec); + + igb_raise_eec_clk(hw, &eecd); + igb_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + wr32(E1000_EECD, eecd); +} + +/** + * igb_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = rd32(E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + igb_raise_eec_clk(hw, &eecd); + + eecd = rd32(E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + igb_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + s32 ret_val = -E1000_ERR_NVM; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = rd32(E1000_EERD); + else + reg = rd32(E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igb_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 igb_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = rd32(E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + s32 ret_val = 0; + + + wr32(E1000_EECD, eecd | E1000_EECD_REQ); + eecd = rd32(E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = rd32(E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -E1000_ERR_NVM; + } + + return ret_val; +} + +/** + * igb_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void igb_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = rd32(E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + igb_lower_eec_clk(hw, &eecd); + } +} + +/** + * igb_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void igb_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = rd32(E1000_EECD); + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); +} + +/** + * igb_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + s32 ret_val = 0; + u16 timeout = 0; + u8 spi_stat_reg; + + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + wr32(E1000_EECD, eecd); + wrfl(); + udelay(1); + timeout = NVM_MAX_RETRY_SPI; + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + igb_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + hw_dbg("SPI NVM Status error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igb_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = igb_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + wr32(E1000_EERD, eerd); + ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + **/ +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + igb_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + igb_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + igb_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + igb_standby_nvm(hw); + break; + } + } + usleep_range(1000, 2000); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * igb_read_part_string - Read device part number + * @hw: pointer to the HW structure + * @part_num: pointer to device part number + * @part_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in part_num. + **/ +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pointer; + u16 offset; + u16 length; + + if (part_num == NULL) { + hw_dbg("PBA string buffer was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pointer is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + hw_dbg("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (part_num_size < 11) { + hw_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pointer */ + part_num[0] = (nvm_data >> 12) & 0xF; + part_num[1] = (nvm_data >> 8) & 0xF; + part_num[2] = (nvm_data >> 4) & 0xF; + part_num[3] = nvm_data & 0xF; + part_num[4] = (pointer >> 12) & 0xF; + part_num[5] = (pointer >> 8) & 0xF; + part_num[6] = '-'; + part_num[7] = 0; + part_num[8] = (pointer >> 4) & 0xF; + part_num[9] = pointer & 0xF; + + /* put a null character on the end of our string */ + part_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (part_num[offset] < 0xA) + part_num[offset] += '0'; + else if (part_num[offset] < 0x10) + part_num[offset] += 'A' - 0xA; + } + + goto out; + } + + ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + hw_dbg("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + /* check if part_num buffer is big enough */ + if (part_num_size < (((u32)length * 2) - 1)) { + hw_dbg("PBA string buffer too small\n"); + ret_val = E1000_ERR_NO_SPACE; + goto out; + } + + /* trim pba length from start of string */ + pointer++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + part_num[offset * 2] = (u8)(nvm_data >> 8); + part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + part_num[offset * 2] = '\0'; + +out: + return ret_val; +} + +/** + * igb_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 igb_read_mac_addr(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(E1000_RAH(0)); + rar_low = rd32(E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igb_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igb_validate_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 igb_update_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output structure + * + * unsupported MAC types will return all 0 version structure + **/ +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + + /* basic eeprom version numbers and bits used vary by part and by tool + * used to create the nvm images. Check which data format we have. + */ + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + switch (hw->mac.type) { + case e1000_i211: + igb_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } + break; + case e1000_i210: + if (!(igb_get_flash_presence_i210(hw))) { + igb_read_invm_version(hw, fw_vers); + return; + } + fallthrough; + case e1000_i350: + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { + + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != NVM_VER_INVALID) && + (comb_verl != NVM_VER_INVALID))) { + + fw_vers->or_valid = true; + fw_vers->or_major = + comb_verl >> NVM_COMB_VER_SHFT; + fw_vers->or_build = + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); + fw_vers->or_patch = + comb_verh & NVM_COMB_VER_MASK; + } + } + break; + default: + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } +} diff --git a/devices/igb/e1000_nvm-6.1-orig.h b/devices/igb/e1000_nvm-6.1-orig.h new file mode 100644 index 00000000..091cddf4 --- /dev/null +++ b/devices/igb/e1000_nvm-6.1-orig.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +s32 igb_acquire_nvm(struct e1000_hw *hw); +void igb_release_nvm(struct e1000_hw *hw); +s32 igb_read_mac_addr(struct e1000_hw *hw); +s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, + u32 part_num_size); +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_validate_nvm_checksum(struct e1000_hw *hw); +s32 igb_update_nvm_checksum(struct e1000_hw *hw); + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers); + +#endif diff --git a/devices/igb/e1000_phy-5.14-ethercat.c b/devices/igb/e1000_phy-5.14-ethercat.c new file mode 100644 index 00000000..2a72f68c --- /dev/null +++ b/devices/igb/e1000_phy-5.14-ethercat.c @@ -0,0 +1,2631 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include + +#include "e1000_mac-5.14-ethercat.h" +#include "e1000_phy-5.14-ethercat.h" + +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl); +static s32 igb_wait_autoneg(struct e1000_hw *hw); +static s32 igb_set_master_slave_mode(struct e1000_hw *hw); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; + +/** + * igb_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 igb_check_reset_block(struct e1000_hw *hw) +{ + u32 manc; + + manc = rd32(E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; +} + +/** + * igb_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 igb_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + /* ensure PHY page selection to fix misconfigured i210 */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igb_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +static s32 igb_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.write_reg)) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + *data = (u16) mdic; + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return 0; +} + +/** + * igb_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + /* Prevent overwriting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { + hw_dbg("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * igb_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + hw_dbg("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + data_local = rd32(E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return 0; +} + +/** + * igb_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 igb_copper_link_setup_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + if (phy->type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); + if (ret_val) + goto out; + + phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; + + ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); + if (ret_val) + goto out; + + /* Set MDI/MDIX mode */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 igb_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + if (phy->revision < E1000_REVISION_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + } + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) + return 0; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + fallthrough; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + ret_val = igb_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * igb_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 igb_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + if (phy->ops.set_d3_lplu_state) + ret_val = phy->ops.set_d3_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D3\n"); + goto out; + } + } + + /* disable lplu d0 during driver init */ + ret_val = phy->ops.set_d0_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D0\n"); + goto out; + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + goto out; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + break; + default: + break; + } + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 igb_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igb_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igb_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igb_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + goto out; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + goto out; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = phy->ops.write_reg(hw, + PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 igb_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igb_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igb_config_collision_dist(hw); + ret_val = igb_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + goto out; + + hw_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on TX must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + + hw_dbg("M88E1000 PSCR: %X\n", phy_data); + } + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Reset the phy to commit changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + if (!reset_dsp) { + hw_dbg("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + goto out; + ret_val = igb_phy_reset_dsp(hw); + if (ret_val) + goto out; + } + } + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + goto out; + } + + if (hw->phy.type != e1000_phy_m88 || + hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == M88E1543_E_PHY_ID || + hw->phy.id == M88E1512_E_PHY_ID || + hw->phy.id == I210_I_PHY_ID) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = rd32(E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + hw_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + hw_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + hw_dbg("Forcing 10mb\n"); + } + + igb_config_collision_dist(hw); + + wr32(E1000_CTRL, ctrl); +} + +/** + * igb_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 data; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * igb_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 igb_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + ret_val = 0; + goto out; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask) ? true : false; + +out: + return ret_val; +} + +/** + * igb_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 igb_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +static s32 igb_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + +out: + return ret_val; +} + +/** + * igb_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 igb_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igb_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igb_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 igb_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, index, default_page, is_cm; + int len_tot = 0; + u16 len_min; + u16 len_max; + + switch (hw->phy.id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case I210_I_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + goto out; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + goto out; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Get cable length from Pair 0 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL0, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[0] = phy_data / (is_cm ? 100 : 1); + len_tot = phy->pair_length[0]; + len_min = phy->pair_length[0]; + len_max = phy->pair_length[0]; + + /* Get cable length from Pair 1 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL1, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[1] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[1]; + len_min = min(len_min, phy->pair_length[1]); + len_max = max(len_max, phy->pair_length[1]); + + /* Get cable length from Pair 2 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL2, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[2] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[2]; + len_min = min(len_min, phy->pair_length[2]); + len_max = max(len_max, phy->pair_length[2]); + + /* Get cable length from Pair 3 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL3, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[3] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[3]; + len_min = min(len_min, phy->pair_length[3]); + len_max = max(len_max, phy->pair_length[3]); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = len_min; + phy->max_cable_length = len_max; + phy->cable_length = len_tot / 4; + + /* Reset the page selec to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + break; + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + goto out; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) || + (cur_agc_index == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * igb_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + hw_dbg("Phy info is only valid for copper media\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) + ? true : false; + + ret_val = igb_check_polarity_m88(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_igp(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 igb_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_ctrl; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + udelay(1); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 igb_phy_hw_reset(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = igb_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(E1000_CTRL, ctrl); + wrfl(); + + udelay(150); + + phy->ops.release(hw); + + ret_val = phy->ops.get_cfg_done(hw); + +out: + return ret_val; +} + +/** + * igb_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 igb_phy_init_script_igp3(struct e1000_hw *hw) +{ + hw_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to TX amplitude in Giga mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return 0; +} + +/** + * igb_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvel 1512 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + +/** + * igb_initialize_M88E1543_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1543 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x0C0D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Switch to PHY page 1. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); + if (ret_val) + goto out; + + /* Change mode to 1000BASE-X/SGMII and autoneg enable */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + +/** + * igb_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + **/ +void igb_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igb_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + **/ +void igb_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + usleep_range(1000, 2000); +} + +/** + * igb_check_polarity_82580 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +static s32 igb_check_polarity_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + + ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + if (ret_val) + goto out; + + hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_82580 - Retrieve I82580 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_82580(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; + + if ((data & I82580_PHY_STATUS2_SPEED_MASK) == + I82580_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_82580 - Determine cable length for 82580 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 igb_get_cable_length_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + goto out; + + length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> + I82580_DSTATUS_CABLE_LENGTH_SHIFT; + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + ret_val = -E1000_ERR_PHY; + + phy->cable_length = length; + +out: + return ret_val; +} + +/** + * igb_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 igb_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + fallthrough; + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} diff --git a/devices/igb/e1000_phy-5.14-ethercat.h b/devices/igb/e1000_phy-5.14-ethercat.h new file mode 100644 index 00000000..5894e4b1 --- /dev/null +++ b/devices/igb/e1000_phy-5.14-ethercat.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +s32 igb_check_downshift(struct e1000_hw *hw); +s32 igb_check_reset_block(struct e1000_hw *hw); +s32 igb_copper_link_setup_igp(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); +s32 igb_get_phy_id(struct e1000_hw *hw); +s32 igb_get_phy_info_igp(struct e1000_hw *hw); +s32 igb_get_phy_info_m88(struct e1000_hw *hw); +s32 igb_phy_sw_reset(struct e1000_hw *hw); +s32 igb_phy_hw_reset(struct e1000_hw *hw); +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 igb_setup_copper_link(struct e1000_hw *hw); +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igb_power_up_phy_copper(struct e1000_hw *hw); +void igb_power_down_phy_copper(struct e1000_hw *hw); +s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw); +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw); +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 igb_copper_link_setup_82580(struct e1000_hw *hw); +s32 igb_get_phy_info_82580(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); +s32 igb_get_cable_length_82580(struct e1000_hw *hw); +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_check_polarity_m88(struct e1000_hw *hw); + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define I82580_ADDR_REG 16 +#define I82580_CFG_REG 22 +#define I82580_CFG_ASSERT_CRS_ON_TX BIT(15) +#define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */ +#define I82580_CTRL_REG 23 +#define I82580_CTRL_DOWNSHIFT_MASK (7u << 10) + +/* 82580 specific PHY registers */ +#define I82580_PHY_CTRL_2 18 +#define I82580_PHY_LBK_CTRL 19 +#define I82580_PHY_STATUS_2 26 +#define I82580_PHY_DIAG_STATUS 31 + +/* I82580 PHY Status 2 */ +#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82580_PHY_STATUS2_MDIX 0x0800 +#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 +#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 + +/* I82580 PHY Control 2 */ +#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82580 PHY Diagnostics Status */ +#define I82580_DSTATUS_CABLE_LENGTH 0x03FC +#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +/* Enable flexible speed on link-up */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct e1000_sfp_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +#endif diff --git a/devices/igb/e1000_phy-5.14-orig.c b/devices/igb/e1000_phy-5.14-orig.c new file mode 100644 index 00000000..a018000f --- /dev/null +++ b/devices/igb/e1000_phy-5.14-orig.c @@ -0,0 +1,2631 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include + +#include "e1000_mac.h" +#include "e1000_phy.h" + +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl); +static s32 igb_wait_autoneg(struct e1000_hw *hw); +static s32 igb_set_master_slave_mode(struct e1000_hw *hw); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; + +/** + * igb_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 igb_check_reset_block(struct e1000_hw *hw) +{ + u32 manc; + + manc = rd32(E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; +} + +/** + * igb_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 igb_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + /* ensure PHY page selection to fix misconfigured i210 */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igb_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +static s32 igb_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.write_reg)) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + *data = (u16) mdic; + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return 0; +} + +/** + * igb_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + /* Prevent overwriting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { + hw_dbg("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * igb_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + hw_dbg("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + data_local = rd32(E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return 0; +} + +/** + * igb_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 igb_copper_link_setup_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + if (phy->type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); + if (ret_val) + goto out; + + phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; + + ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); + if (ret_val) + goto out; + + /* Set MDI/MDIX mode */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 igb_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + if (phy->revision < E1000_REVISION_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + } + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) + return 0; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + fallthrough; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + ret_val = igb_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * igb_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 igb_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + if (phy->ops.set_d3_lplu_state) + ret_val = phy->ops.set_d3_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D3\n"); + goto out; + } + } + + /* disable lplu d0 during driver init */ + ret_val = phy->ops.set_d0_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D0\n"); + goto out; + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + goto out; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + break; + default: + break; + } + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 igb_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igb_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igb_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igb_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + goto out; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + goto out; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = phy->ops.write_reg(hw, + PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 igb_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igb_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igb_config_collision_dist(hw); + ret_val = igb_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + goto out; + + hw_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on TX must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + + hw_dbg("M88E1000 PSCR: %X\n", phy_data); + } + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Reset the phy to commit changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + if (!reset_dsp) { + hw_dbg("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + goto out; + ret_val = igb_phy_reset_dsp(hw); + if (ret_val) + goto out; + } + } + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + goto out; + } + + if (hw->phy.type != e1000_phy_m88 || + hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == M88E1543_E_PHY_ID || + hw->phy.id == M88E1512_E_PHY_ID || + hw->phy.id == I210_I_PHY_ID) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = rd32(E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + hw_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + hw_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + hw_dbg("Forcing 10mb\n"); + } + + igb_config_collision_dist(hw); + + wr32(E1000_CTRL, ctrl); +} + +/** + * igb_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 data; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * igb_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 igb_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + ret_val = 0; + goto out; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask) ? true : false; + +out: + return ret_val; +} + +/** + * igb_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 igb_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +static s32 igb_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + +out: + return ret_val; +} + +/** + * igb_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 igb_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igb_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igb_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 igb_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, index, default_page, is_cm; + int len_tot = 0; + u16 len_min; + u16 len_max; + + switch (hw->phy.id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case I210_I_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + goto out; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + goto out; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Get cable length from Pair 0 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL0, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[0] = phy_data / (is_cm ? 100 : 1); + len_tot = phy->pair_length[0]; + len_min = phy->pair_length[0]; + len_max = phy->pair_length[0]; + + /* Get cable length from Pair 1 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL1, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[1] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[1]; + len_min = min(len_min, phy->pair_length[1]); + len_max = max(len_max, phy->pair_length[1]); + + /* Get cable length from Pair 2 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL2, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[2] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[2]; + len_min = min(len_min, phy->pair_length[2]); + len_max = max(len_max, phy->pair_length[2]); + + /* Get cable length from Pair 3 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL3, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[3] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[3]; + len_min = min(len_min, phy->pair_length[3]); + len_max = max(len_max, phy->pair_length[3]); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = len_min; + phy->max_cable_length = len_max; + phy->cable_length = len_tot / 4; + + /* Reset the page selec to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + break; + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + goto out; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) || + (cur_agc_index == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * igb_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + hw_dbg("Phy info is only valid for copper media\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) + ? true : false; + + ret_val = igb_check_polarity_m88(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_igp(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 igb_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_ctrl; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + udelay(1); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 igb_phy_hw_reset(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = igb_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(E1000_CTRL, ctrl); + wrfl(); + + udelay(150); + + phy->ops.release(hw); + + ret_val = phy->ops.get_cfg_done(hw); + +out: + return ret_val; +} + +/** + * igb_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 igb_phy_init_script_igp3(struct e1000_hw *hw) +{ + hw_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to TX amplitude in Giga mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return 0; +} + +/** + * igb_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvel 1512 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + +/** + * igb_initialize_M88E1543_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1543 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x0C0D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Switch to PHY page 1. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); + if (ret_val) + goto out; + + /* Change mode to 1000BASE-X/SGMII and autoneg enable */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + +/** + * igb_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + **/ +void igb_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igb_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + **/ +void igb_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + usleep_range(1000, 2000); +} + +/** + * igb_check_polarity_82580 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +static s32 igb_check_polarity_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + + ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + if (ret_val) + goto out; + + hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_82580 - Retrieve I82580 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_82580(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; + + if ((data & I82580_PHY_STATUS2_SPEED_MASK) == + I82580_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_82580 - Determine cable length for 82580 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 igb_get_cable_length_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + goto out; + + length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> + I82580_DSTATUS_CABLE_LENGTH_SHIFT; + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + ret_val = -E1000_ERR_PHY; + + phy->cable_length = length; + +out: + return ret_val; +} + +/** + * igb_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 igb_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + fallthrough; + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} diff --git a/devices/igb/e1000_phy-5.14-orig.h b/devices/igb/e1000_phy-5.14-orig.h new file mode 100644 index 00000000..5894e4b1 --- /dev/null +++ b/devices/igb/e1000_phy-5.14-orig.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +s32 igb_check_downshift(struct e1000_hw *hw); +s32 igb_check_reset_block(struct e1000_hw *hw); +s32 igb_copper_link_setup_igp(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); +s32 igb_get_phy_id(struct e1000_hw *hw); +s32 igb_get_phy_info_igp(struct e1000_hw *hw); +s32 igb_get_phy_info_m88(struct e1000_hw *hw); +s32 igb_phy_sw_reset(struct e1000_hw *hw); +s32 igb_phy_hw_reset(struct e1000_hw *hw); +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 igb_setup_copper_link(struct e1000_hw *hw); +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igb_power_up_phy_copper(struct e1000_hw *hw); +void igb_power_down_phy_copper(struct e1000_hw *hw); +s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw); +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw); +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 igb_copper_link_setup_82580(struct e1000_hw *hw); +s32 igb_get_phy_info_82580(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); +s32 igb_get_cable_length_82580(struct e1000_hw *hw); +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_check_polarity_m88(struct e1000_hw *hw); + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define I82580_ADDR_REG 16 +#define I82580_CFG_REG 22 +#define I82580_CFG_ASSERT_CRS_ON_TX BIT(15) +#define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */ +#define I82580_CTRL_REG 23 +#define I82580_CTRL_DOWNSHIFT_MASK (7u << 10) + +/* 82580 specific PHY registers */ +#define I82580_PHY_CTRL_2 18 +#define I82580_PHY_LBK_CTRL 19 +#define I82580_PHY_STATUS_2 26 +#define I82580_PHY_DIAG_STATUS 31 + +/* I82580 PHY Status 2 */ +#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82580_PHY_STATUS2_MDIX 0x0800 +#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 +#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 + +/* I82580 PHY Control 2 */ +#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82580 PHY Diagnostics Status */ +#define I82580_DSTATUS_CABLE_LENGTH 0x03FC +#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +/* Enable flexible speed on link-up */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct e1000_sfp_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +#endif diff --git a/devices/igb/e1000_phy-6.1-ethercat.c b/devices/igb/e1000_phy-6.1-ethercat.c new file mode 100644 index 00000000..561e892a --- /dev/null +++ b/devices/igb/e1000_phy-6.1-ethercat.c @@ -0,0 +1,2631 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include + +#include "e1000_mac-6.1-ethercat.h" +#include "e1000_phy-6.1-ethercat.h" + +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl); +static s32 igb_wait_autoneg(struct e1000_hw *hw); +static s32 igb_set_master_slave_mode(struct e1000_hw *hw); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; + +/** + * igb_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 igb_check_reset_block(struct e1000_hw *hw) +{ + u32 manc; + + manc = rd32(E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; +} + +/** + * igb_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 igb_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + /* ensure PHY page selection to fix misconfigured i210 */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igb_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +static s32 igb_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.write_reg)) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + *data = (u16) mdic; + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return 0; +} + +/** + * igb_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + /* Prevent overwriting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { + hw_dbg("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * igb_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + hw_dbg("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + data_local = rd32(E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return 0; +} + +/** + * igb_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 igb_copper_link_setup_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + if (phy->type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); + if (ret_val) + goto out; + + phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; + + ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); + if (ret_val) + goto out; + + /* Set MDI/MDIX mode */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 igb_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + if (phy->revision < E1000_REVISION_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + } + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) + return 0; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + fallthrough; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + ret_val = igb_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * igb_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 igb_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + if (phy->ops.set_d3_lplu_state) + ret_val = phy->ops.set_d3_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D3\n"); + goto out; + } + } + + /* disable lplu d0 during driver init */ + ret_val = phy->ops.set_d0_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D0\n"); + goto out; + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + goto out; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + break; + default: + break; + } + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 igb_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igb_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igb_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igb_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + goto out; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + goto out; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = phy->ops.write_reg(hw, + PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 igb_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igb_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igb_config_collision_dist(hw); + ret_val = igb_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + goto out; + + hw_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on TX must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + + hw_dbg("M88E1000 PSCR: %X\n", phy_data); + } + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Reset the phy to commit changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + if (!reset_dsp) { + hw_dbg("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + goto out; + ret_val = igb_phy_reset_dsp(hw); + if (ret_val) + goto out; + } + } + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + goto out; + } + + if (hw->phy.type != e1000_phy_m88 || + hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == M88E1543_E_PHY_ID || + hw->phy.id == M88E1512_E_PHY_ID || + hw->phy.id == I210_I_PHY_ID) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = rd32(E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + hw_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + hw_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + hw_dbg("Forcing 10mb\n"); + } + + igb_config_collision_dist(hw); + + wr32(E1000_CTRL, ctrl); +} + +/** + * igb_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 data; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * igb_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 igb_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + ret_val = 0; + goto out; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask) ? true : false; + +out: + return ret_val; +} + +/** + * igb_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 igb_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +static s32 igb_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + +out: + return ret_val; +} + +/** + * igb_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 igb_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igb_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igb_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 igb_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, index, default_page, is_cm; + int len_tot = 0; + u16 len_min; + u16 len_max; + + switch (hw->phy.id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case I210_I_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + goto out; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + goto out; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Get cable length from Pair 0 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL0, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[0] = phy_data / (is_cm ? 100 : 1); + len_tot = phy->pair_length[0]; + len_min = phy->pair_length[0]; + len_max = phy->pair_length[0]; + + /* Get cable length from Pair 1 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL1, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[1] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[1]; + len_min = min(len_min, phy->pair_length[1]); + len_max = max(len_max, phy->pair_length[1]); + + /* Get cable length from Pair 2 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL2, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[2] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[2]; + len_min = min(len_min, phy->pair_length[2]); + len_max = max(len_max, phy->pair_length[2]); + + /* Get cable length from Pair 3 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL3, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[3] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[3]; + len_min = min(len_min, phy->pair_length[3]); + len_max = max(len_max, phy->pair_length[3]); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = len_min; + phy->max_cable_length = len_max; + phy->cable_length = len_tot / 4; + + /* Reset the page selec to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + break; + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + goto out; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) || + (cur_agc_index == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * igb_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + hw_dbg("Phy info is only valid for copper media\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) + ? true : false; + + ret_val = igb_check_polarity_m88(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_igp(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 igb_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_ctrl; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + udelay(1); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 igb_phy_hw_reset(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = igb_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(E1000_CTRL, ctrl); + wrfl(); + + udelay(150); + + phy->ops.release(hw); + + ret_val = phy->ops.get_cfg_done(hw); + +out: + return ret_val; +} + +/** + * igb_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 igb_phy_init_script_igp3(struct e1000_hw *hw) +{ + hw_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to TX amplitude in Giga mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return 0; +} + +/** + * igb_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvel 1512 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + +/** + * igb_initialize_M88E1543_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1543 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x0C0D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Switch to PHY page 1. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); + if (ret_val) + goto out; + + /* Change mode to 1000BASE-X/SGMII and autoneg enable */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + +/** + * igb_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + **/ +void igb_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igb_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + **/ +void igb_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + usleep_range(1000, 2000); +} + +/** + * igb_check_polarity_82580 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +static s32 igb_check_polarity_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + + ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + if (ret_val) + goto out; + + hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_82580 - Retrieve I82580 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_82580(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; + + if ((data & I82580_PHY_STATUS2_SPEED_MASK) == + I82580_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_82580 - Determine cable length for 82580 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 igb_get_cable_length_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + goto out; + + length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> + I82580_DSTATUS_CABLE_LENGTH_SHIFT; + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + ret_val = -E1000_ERR_PHY; + + phy->cable_length = length; + +out: + return ret_val; +} + +/** + * igb_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 igb_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + fallthrough; + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} diff --git a/devices/igb/e1000_phy-6.1-ethercat.h b/devices/igb/e1000_phy-6.1-ethercat.h new file mode 100644 index 00000000..5894e4b1 --- /dev/null +++ b/devices/igb/e1000_phy-6.1-ethercat.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +s32 igb_check_downshift(struct e1000_hw *hw); +s32 igb_check_reset_block(struct e1000_hw *hw); +s32 igb_copper_link_setup_igp(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); +s32 igb_get_phy_id(struct e1000_hw *hw); +s32 igb_get_phy_info_igp(struct e1000_hw *hw); +s32 igb_get_phy_info_m88(struct e1000_hw *hw); +s32 igb_phy_sw_reset(struct e1000_hw *hw); +s32 igb_phy_hw_reset(struct e1000_hw *hw); +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 igb_setup_copper_link(struct e1000_hw *hw); +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igb_power_up_phy_copper(struct e1000_hw *hw); +void igb_power_down_phy_copper(struct e1000_hw *hw); +s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw); +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw); +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 igb_copper_link_setup_82580(struct e1000_hw *hw); +s32 igb_get_phy_info_82580(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); +s32 igb_get_cable_length_82580(struct e1000_hw *hw); +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_check_polarity_m88(struct e1000_hw *hw); + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define I82580_ADDR_REG 16 +#define I82580_CFG_REG 22 +#define I82580_CFG_ASSERT_CRS_ON_TX BIT(15) +#define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */ +#define I82580_CTRL_REG 23 +#define I82580_CTRL_DOWNSHIFT_MASK (7u << 10) + +/* 82580 specific PHY registers */ +#define I82580_PHY_CTRL_2 18 +#define I82580_PHY_LBK_CTRL 19 +#define I82580_PHY_STATUS_2 26 +#define I82580_PHY_DIAG_STATUS 31 + +/* I82580 PHY Status 2 */ +#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82580_PHY_STATUS2_MDIX 0x0800 +#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 +#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 + +/* I82580 PHY Control 2 */ +#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82580 PHY Diagnostics Status */ +#define I82580_DSTATUS_CABLE_LENGTH 0x03FC +#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +/* Enable flexible speed on link-up */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct e1000_sfp_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +#endif diff --git a/devices/igb/e1000_phy-6.1-orig.c b/devices/igb/e1000_phy-6.1-orig.c new file mode 100644 index 00000000..a018000f --- /dev/null +++ b/devices/igb/e1000_phy-6.1-orig.c @@ -0,0 +1,2631 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include +#include + +#include "e1000_mac.h" +#include "e1000_phy.h" + +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl); +static s32 igb_wait_autoneg(struct e1000_hw *hw); +static s32 igb_set_master_slave_mode(struct e1000_hw *hw); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; + +/** + * igb_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 igb_check_reset_block(struct e1000_hw *hw) +{ + u32 manc; + + manc = rd32(E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; +} + +/** + * igb_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 igb_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + /* ensure PHY page selection to fix misconfigured i210 */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igb_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +static s32 igb_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.write_reg)) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + *data = (u16) mdic; + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return 0; +} + +/** + * igb_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + /* Prevent overwriting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { + hw_dbg("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * igb_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + hw_dbg("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + data_local = rd32(E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return 0; +} + +/** + * igb_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 igb_copper_link_setup_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + if (phy->type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); + if (ret_val) + goto out; + + phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; + + ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); + if (ret_val) + goto out; + + /* Set MDI/MDIX mode */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 igb_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + if (phy->revision < E1000_REVISION_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + } + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) + return 0; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + fallthrough; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + ret_val = igb_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * igb_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 igb_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + if (phy->ops.set_d3_lplu_state) + ret_val = phy->ops.set_d3_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D3\n"); + goto out; + } + } + + /* disable lplu d0 during driver init */ + ret_val = phy->ops.set_d0_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D0\n"); + goto out; + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + goto out; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + break; + default: + break; + } + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 igb_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igb_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igb_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igb_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + goto out; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + goto out; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = phy->ops.write_reg(hw, + PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 igb_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igb_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igb_config_collision_dist(hw); + ret_val = igb_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + goto out; + + hw_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on TX must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + + hw_dbg("M88E1000 PSCR: %X\n", phy_data); + } + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Reset the phy to commit changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + if (!reset_dsp) { + hw_dbg("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + goto out; + ret_val = igb_phy_reset_dsp(hw); + if (ret_val) + goto out; + } + } + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + goto out; + } + + if (hw->phy.type != e1000_phy_m88 || + hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == M88E1543_E_PHY_ID || + hw->phy.id == M88E1512_E_PHY_ID || + hw->phy.id == I210_I_PHY_ID) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = rd32(E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + hw_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + hw_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + hw_dbg("Forcing 10mb\n"); + } + + igb_config_collision_dist(hw); + + wr32(E1000_CTRL, ctrl); +} + +/** + * igb_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 data; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * igb_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 igb_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + ret_val = 0; + goto out; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask) ? true : false; + +out: + return ret_val; +} + +/** + * igb_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 igb_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +static s32 igb_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + +out: + return ret_val; +} + +/** + * igb_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 igb_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igb_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igb_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 igb_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, index, default_page, is_cm; + int len_tot = 0; + u16 len_min; + u16 len_max; + + switch (hw->phy.id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case I210_I_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + goto out; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + goto out; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Get cable length from Pair 0 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL0, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[0] = phy_data / (is_cm ? 100 : 1); + len_tot = phy->pair_length[0]; + len_min = phy->pair_length[0]; + len_max = phy->pair_length[0]; + + /* Get cable length from Pair 1 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL1, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[1] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[1]; + len_min = min(len_min, phy->pair_length[1]); + len_max = max(len_max, phy->pair_length[1]); + + /* Get cable length from Pair 2 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL2, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[2] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[2]; + len_min = min(len_min, phy->pair_length[2]); + len_max = max(len_max, phy->pair_length[2]); + + /* Get cable length from Pair 3 length Regs */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDL3, &phy_data); + if (ret_val) + goto out; + + phy->pair_length[3] = phy_data / (is_cm ? 100 : 1); + len_tot += phy->pair_length[3]; + len_min = min(len_min, phy->pair_length[3]); + len_max = max(len_max, phy->pair_length[3]); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = len_min; + phy->max_cable_length = len_max; + phy->cable_length = len_tot / 4; + + /* Reset the page selec to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + break; + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + goto out; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) || + (cur_agc_index == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * igb_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + hw_dbg("Phy info is only valid for copper media\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) + ? true : false; + + ret_val = igb_check_polarity_m88(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_igp(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 igb_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_ctrl; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + udelay(1); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 igb_phy_hw_reset(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = igb_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(E1000_CTRL, ctrl); + wrfl(); + + udelay(150); + + phy->ops.release(hw); + + ret_val = phy->ops.get_cfg_done(hw); + +out: + return ret_val; +} + +/** + * igb_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 igb_phy_init_script_igp3(struct e1000_hw *hw) +{ + hw_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to TX amplitude in Giga mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return 0; +} + +/** + * igb_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvel 1512 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + +/** + * igb_initialize_M88E1543_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1543 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x0C0D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Switch to PHY page 1. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); + if (ret_val) + goto out; + + /* Change mode to 1000BASE-X/SGMII and autoneg enable */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + +/** + * igb_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + **/ +void igb_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igb_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + **/ +void igb_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + usleep_range(1000, 2000); +} + +/** + * igb_check_polarity_82580 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +static s32 igb_check_polarity_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + + ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + if (ret_val) + goto out; + + hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_82580 - Retrieve I82580 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_82580(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; + + if ((data & I82580_PHY_STATUS2_SPEED_MASK) == + I82580_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_82580 - Determine cable length for 82580 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 igb_get_cable_length_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + goto out; + + length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> + I82580_DSTATUS_CABLE_LENGTH_SHIFT; + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + ret_val = -E1000_ERR_PHY; + + phy->cable_length = length; + +out: + return ret_val; +} + +/** + * igb_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 igb_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + fallthrough; + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} diff --git a/devices/igb/e1000_phy-6.1-orig.h b/devices/igb/e1000_phy-6.1-orig.h new file mode 100644 index 00000000..5894e4b1 --- /dev/null +++ b/devices/igb/e1000_phy-6.1-orig.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +s32 igb_check_downshift(struct e1000_hw *hw); +s32 igb_check_reset_block(struct e1000_hw *hw); +s32 igb_copper_link_setup_igp(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); +s32 igb_get_phy_id(struct e1000_hw *hw); +s32 igb_get_phy_info_igp(struct e1000_hw *hw); +s32 igb_get_phy_info_m88(struct e1000_hw *hw); +s32 igb_phy_sw_reset(struct e1000_hw *hw); +s32 igb_phy_hw_reset(struct e1000_hw *hw); +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 igb_setup_copper_link(struct e1000_hw *hw); +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igb_power_up_phy_copper(struct e1000_hw *hw); +void igb_power_down_phy_copper(struct e1000_hw *hw); +s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw); +s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw); +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 igb_copper_link_setup_82580(struct e1000_hw *hw); +s32 igb_get_phy_info_82580(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); +s32 igb_get_cable_length_82580(struct e1000_hw *hw); +s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_check_polarity_m88(struct e1000_hw *hw); + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define I82580_ADDR_REG 16 +#define I82580_CFG_REG 22 +#define I82580_CFG_ASSERT_CRS_ON_TX BIT(15) +#define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */ +#define I82580_CTRL_REG 23 +#define I82580_CTRL_DOWNSHIFT_MASK (7u << 10) + +/* 82580 specific PHY registers */ +#define I82580_PHY_CTRL_2 18 +#define I82580_PHY_LBK_CTRL 19 +#define I82580_PHY_STATUS_2 26 +#define I82580_PHY_DIAG_STATUS 31 + +/* I82580 PHY Status 2 */ +#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82580_PHY_STATUS2_MDIX 0x0800 +#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 +#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 + +/* I82580 PHY Control 2 */ +#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82580 PHY Diagnostics Status */ +#define I82580_DSTATUS_CABLE_LENGTH 0x03FC +#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +/* Enable flexible speed on link-up */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct e1000_sfp_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +#endif diff --git a/devices/igb/e1000_regs-5.14-ethercat.h b/devices/igb/e1000_regs-5.14-ethercat.h new file mode 100644 index 00000000..9cb49980 --- /dev/null +++ b/devices/igb/e1000_regs-5.14-ethercat.h @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ + +/* IEEE 1588 TIMESYNCH */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +#define E1000_SAQF0 E1000_SAQF(0) +#define E1000_DAQF0 E1000_DAQF(0) +#define E1000_SPQF0 E1000_SPQF(0) +#define E1000_FTQF0 E1000_FTQF(0) +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* TX Rate Limit Registers */ +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ +#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ + +/* Split and Replication RX Control - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ + +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ + : (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ + : (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ + : (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ + : (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ + : (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ + : (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ + : (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ + : (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ + : (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ + : (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ + : (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ + : (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ + : (0x0E028 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ + : (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ + : (0x0E03C + ((_n) * 0x40))) + +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +/* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXPTC 0x04104 +/* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICRXATC 0x04108 +/* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C +/* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXATC 0x04110 +/* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQEC 0x04118 +/* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICTXQMTC 0x0411C +/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ +#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ + +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ +/* MSI-X Allocation Register (_i) - RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) +/* Redirection Table - RW Array */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ + +/* VT Registers */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +/* These act per VF so an array friendly macro is used */ +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */ +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) + +struct e1000_hw; + +u32 igb_rd32(struct e1000_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!E1000_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igb_rd32(hw, reg)) + +#define wrfl() ((void)rd32(E1000_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igb_rd32(hw, reg + ((offset) << 2))) + +/* DMA Coalescing registers */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* Energy Efficient Ethernet "EEE" register */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ +#define E1000_EEE_SU 0X0E34 /* EEE Setup */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + +/* Thermal Sensor Register */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_I210_DTXMXPKTSZ 0x355C + +#define E1000_I210_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +#define E1000_I210_TQAVCTRL 0x3570 +#define E1000_I210_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define E1000_I210_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + +#define E1000_I210_RR2DCDELAY 0x5BF4 + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +#define E1000_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/devices/igb/e1000_regs-5.14-orig.h b/devices/igb/e1000_regs-5.14-orig.h new file mode 100644 index 00000000..9cb49980 --- /dev/null +++ b/devices/igb/e1000_regs-5.14-orig.h @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ + +/* IEEE 1588 TIMESYNCH */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +#define E1000_SAQF0 E1000_SAQF(0) +#define E1000_DAQF0 E1000_DAQF(0) +#define E1000_SPQF0 E1000_SPQF(0) +#define E1000_FTQF0 E1000_FTQF(0) +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* TX Rate Limit Registers */ +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ +#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ + +/* Split and Replication RX Control - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ + +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ + : (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ + : (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ + : (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ + : (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ + : (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ + : (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ + : (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ + : (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ + : (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ + : (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ + : (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ + : (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ + : (0x0E028 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ + : (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ + : (0x0E03C + ((_n) * 0x40))) + +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +/* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXPTC 0x04104 +/* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICRXATC 0x04108 +/* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C +/* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXATC 0x04110 +/* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQEC 0x04118 +/* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICTXQMTC 0x0411C +/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ +#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ + +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ +/* MSI-X Allocation Register (_i) - RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) +/* Redirection Table - RW Array */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ + +/* VT Registers */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +/* These act per VF so an array friendly macro is used */ +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */ +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) + +struct e1000_hw; + +u32 igb_rd32(struct e1000_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!E1000_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igb_rd32(hw, reg)) + +#define wrfl() ((void)rd32(E1000_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igb_rd32(hw, reg + ((offset) << 2))) + +/* DMA Coalescing registers */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* Energy Efficient Ethernet "EEE" register */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ +#define E1000_EEE_SU 0X0E34 /* EEE Setup */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + +/* Thermal Sensor Register */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_I210_DTXMXPKTSZ 0x355C + +#define E1000_I210_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +#define E1000_I210_TQAVCTRL 0x3570 +#define E1000_I210_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define E1000_I210_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + +#define E1000_I210_RR2DCDELAY 0x5BF4 + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +#define E1000_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/devices/igb/e1000_regs-6.1-ethercat.h b/devices/igb/e1000_regs-6.1-ethercat.h new file mode 100644 index 00000000..eb9f6da9 --- /dev/null +++ b/devices/igb/e1000_regs-6.1-ethercat.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ + +/* IEEE 1588 TIMESYNCH */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +#define E1000_SAQF0 E1000_SAQF(0) +#define E1000_DAQF0 E1000_DAQF(0) +#define E1000_SPQF0 E1000_SPQF(0) +#define E1000_FTQF0 E1000_FTQF(0) +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ + +/* TX Rate Limit Registers */ +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ +#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ + +/* Split and Replication RX Control - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ + +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ + : (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ + : (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ + : (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ + : (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ + : (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ + : (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ + : (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ + : (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ + : (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ + : (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ + : (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ + : (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ + : (0x0E028 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ + : (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ + : (0x0E03C + ((_n) * 0x40))) + +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +/* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXPTC 0x04104 +/* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICRXATC 0x04108 +/* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C +/* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXATC 0x04110 +/* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQEC 0x04118 +/* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICTXQMTC 0x0411C +/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ +#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ + +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ +/* MSI-X Allocation Register (_i) - RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) +/* Redirection Table - RW Array */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ + +/* VT Registers */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +/* These act per VF so an array friendly macro is used */ +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */ +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) + +struct e1000_hw; + +u32 igb_rd32(struct e1000_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!E1000_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igb_rd32(hw, reg)) + +#define wrfl() ((void)rd32(E1000_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igb_rd32(hw, reg + ((offset) << 2))) + +/* DMA Coalescing registers */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* Energy Efficient Ethernet "EEE" register */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ +#define E1000_EEE_SU 0X0E34 /* EEE Setup */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + +/* Thermal Sensor Register */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_I210_DTXMXPKTSZ 0x355C + +#define E1000_I210_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +#define E1000_I210_TQAVCTRL 0x3570 +#define E1000_I210_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define E1000_I210_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + +#define E1000_I210_RR2DCDELAY 0x5BF4 + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +#define E1000_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/devices/igb/e1000_regs-6.1-orig.h b/devices/igb/e1000_regs-6.1-orig.h new file mode 100644 index 00000000..eb9f6da9 --- /dev/null +++ b/devices/igb/e1000_regs-6.1-orig.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ + +/* IEEE 1588 TIMESYNCH */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +#define E1000_SAQF0 E1000_SAQF(0) +#define E1000_DAQF0 E1000_DAQF(0) +#define E1000_SPQF0 E1000_SPQF(0) +#define E1000_FTQF0 E1000_FTQF(0) +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ + +/* TX Rate Limit Registers */ +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ +#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ + +/* Split and Replication RX Control - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ + +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ + : (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ + : (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ + : (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ + : (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ + : (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ + : (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ + : (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ + : (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ + : (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ + : (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ + : (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ + : (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ + : (0x0E028 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ + : (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ + : (0x0E03C + ((_n) * 0x40))) + +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +/* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXPTC 0x04104 +/* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICRXATC 0x04108 +/* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C +/* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXATC 0x04110 +/* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQEC 0x04118 +/* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICTXQMTC 0x0411C +/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ +#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ + +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ +/* MSI-X Allocation Register (_i) - RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) +/* Redirection Table - RW Array */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ + +/* VT Registers */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +/* These act per VF so an array friendly macro is used */ +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */ +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) + +struct e1000_hw; + +u32 igb_rd32(struct e1000_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!E1000_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igb_rd32(hw, reg)) + +#define wrfl() ((void)rd32(E1000_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igb_rd32(hw, reg + ((offset) << 2))) + +/* DMA Coalescing registers */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* Energy Efficient Ethernet "EEE" register */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ +#define E1000_EEE_SU 0X0E34 /* EEE Setup */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + +/* Thermal Sensor Register */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_I210_DTXMXPKTSZ 0x355C + +#define E1000_I210_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +#define E1000_I210_TQAVCTRL 0x3570 +#define E1000_I210_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define E1000_I210_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + +#define E1000_I210_RR2DCDELAY 0x5BF4 + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +#define E1000_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/devices/igb/igb-5.14-ethercat.h b/devices/igb/igb-5.14-ethercat.h new file mode 100644 index 00000000..8c634215 --- /dev/null +++ b/devices/igb/igb-5.14-ethercat.h @@ -0,0 +1,815 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGB_H_ +#define _IGB_H_ + +#include "e1000_mac-5.14-ethercat.h" +#include "e1000_82575-5.14-ethercat.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* EtherCAT header file */ +#include "../ecdev.h" + +struct igb_adapter; + +#define E1000_PCS_CFG_IGN_SD 1 + +/* Interrupt defines */ +#define IGB_START_ITR 648 /* ~6000 ints/sec */ +#define IGB_4K_ITR 980 +#define IGB_20K_ITR 196 +#define IGB_70K_ITR 56 + +/* TX/RX descriptor defines */ +#define IGB_DEFAULT_TXD 256 +#define IGB_DEFAULT_TX_WORK 128 +#define IGB_MIN_TXD 80 +#define IGB_MAX_TXD 4096 + +#define IGB_DEFAULT_RXD 256 +#define IGB_MIN_RXD 80 +#define IGB_MAX_RXD 4096 + +#define IGB_DEFAULT_ITR 3 /* dynamic */ +#define IGB_MAX_ITR_USECS 10000 +#define IGB_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_Q_VECTORS 8 +#define MAX_MSIX_ENTRIES 10 + +/* Transmit and receive queues */ +#define IGB_MAX_RX_QUEUES 8 +#define IGB_MAX_RX_QUEUES_82575 4 +#define IGB_MAX_RX_QUEUES_I211 2 +#define IGB_MAX_TX_QUEUES 8 +#define IGB_MAX_VF_MC_ENTRIES 30 +#define IGB_MAX_VF_FUNCTIONS 8 +#define IGB_MAX_VFTA_ENTRIES 128 +#define IGB_82576_VF_DEV_ID 0x10CA +#define IGB_I350_VF_DEV_ID 0x1520 + +/* NVM version defines */ +#define IGB_MAJOR_MASK 0xF000 +#define IGB_MINOR_MASK 0x0FF0 +#define IGB_BUILD_MASK 0x000F +#define IGB_COMB_VER_MASK 0x00FF +#define IGB_MAJOR_SHIFT 12 +#define IGB_MINOR_SHIFT 4 +#define IGB_COMB_VER_SHFT 8 +#define IGB_NVM_VER_INVALID 0xFFFF +#define IGB_ETRACK_SHIFT 16 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGB_I210_TX_LATENCY_10 9542 +#define IGB_I210_TX_LATENCY_100 1024 +#define IGB_I210_TX_LATENCY_1000 178 +#define IGB_I210_RX_LATENCY_10 20662 +#define IGB_I210_RX_LATENCY_100 2213 +#define IGB_I210_RX_LATENCY_1000 448 + +/* XDP */ +#define IGB_XDP_PASS 0 +#define IGB_XDP_CONSUMED BIT(0) +#define IGB_XDP_TX BIT(1) +#define IGB_XDP_REDIR BIT(2) + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u32 flags; + unsigned long last_nack; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; + bool spoofchk_enabled; + bool trusted; +}; + +/* Number of unicast MAC filters reserved for the PF in the RAR registers */ +#define IGB_PF_MAC_FILTERS_RESERVED 3 + +struct vf_mac_filter { + struct list_head l; + int vf; + bool free; + u8 vf_mac[ETH_ALEN]; +}; + +#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ +#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ +#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_RX_HTHRESH 8 +#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_TX_HTHRESH 1 +#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4) +#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16) + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + +/* Supported Rx Buffer Sizes */ +#define IGB_RXBUFFER_256 256 +#define IGB_RXBUFFER_1536 1536 +#define IGB_RXBUFFER_2048 2048 +#define IGB_RXBUFFER_3072 3072 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +#define IGB_TS_HDR_LEN 16 + +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the 3K + * buffers. + */ +#if (PAGE_SIZE < 8192) +#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN) +#define IGB_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048)) + +static inline int igb_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int igb_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (IGB_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = IGB_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return igb_compute_pad(rx_buf_len); +} + +#define IGB_SKB_PAD igb_skb_pad() +#else +#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define IGB_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define AUTO_ALL_MODES 0 +#define IGB_EEPROM_APME 0x0400 + +#ifndef IGB_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define IGB_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define IGB_MNG_VLAN_NONE -1 + +enum igb_tx_flags { + /* cmd_type flags */ + IGB_TX_FLAGS_VLAN = 0x01, + IGB_TX_FLAGS_TSO = 0x02, + IGB_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGB_TX_FLAGS_IPV4 = 0x10, + IGB_TX_FLAGS_CSUM = 0x20, +}; + +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGB_MAX_TXD_PWR 15 +#define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* EEPROM byte offsets */ +#define IGB_SFF_8472_SWAP 0x5C +#define IGB_SFF_8472_COMP 0x5E + +/* Bitmasks */ +#define IGB_SFF_ADDRESSING_MODE 0x4 +#define IGB_SFF_8472_UNSUP 0x00 + +/* TX resources are shared between XDP and netstack + * and we need to tag the buffer type to distinguish them + */ +enum igb_tx_buf_type { + IGB_TYPE_SKB = 0, + IGB_TYPE_XDP, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igb_tx_buf_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_ring_container { + struct igb_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igb_ring { + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct bpf_prog *xdp_prog; + struct device *dev; /* device pointer for dma mapping */ + union { /* array of buffer info structs */ + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igb_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct sk_buff *skb; + struct igb_rx_queue_stats rx_stats; + struct u64_stats_sync rx_syncp; + }; + }; + struct xdp_rxq_info xdp_rxq; +} ____cacheline_internodealigned_in_smp; + +struct igb_q_vector { + struct igb_adapter *adapter; /* backlink */ + int cpu; /* CPU for DCA */ + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + void __iomem *itr_register; + + struct igb_ring_container rx, tx; + + struct napi_struct napi; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igb_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_3K_BUFFER, + IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGB_RING_FLAG_RX_SCTP_CSUM, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_TX_CTX_IDX, + IGB_RING_FLAG_TX_DETECT_HANG +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define set_ring_build_skb_enabled(ring) \ + set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define clear_ring_build_skb_enabled(ring) \ + clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igb_rx_bufsz(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGB_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGB_MAX_FRAME_BUILD_SKB; +#endif + return IGB_RXBUFFER_2048; +} + +static inline unsigned int igb_rx_pg_order(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring)) + +#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + +#define IGB_RX_DESC(R, i) \ + (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) +#define IGB_TX_DESC(R, i) \ + (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) +#define IGB_TX_CTXTDESC(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) + +/* igb_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* igb_desc_unused - calculate if we have unused descriptors */ +static inline int igb_desc_unused(struct igb_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +#ifdef CONFIG_IGB_HWMON + +#define IGB_HWMON_TYPE_LOC 0 +#define IGB_HWMON_TYPE_TEMP 1 +#define IGB_HWMON_TYPE_CAUTION 2 +#define IGB_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; + }; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4]; + unsigned int n_hwmon; + }; +#endif + +/* The number of L2 ether-type filter registers, Index 3 is reserved + * for PTP 1588 timestamp + */ +#define MAX_ETYPE_FILTER (4 - 1) +/* ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters here!! + * + * Current filters: Filter 3 + */ +#define IGB_ETQF_FILTER_1588 3 + +#define IGB_N_EXTTS 2 +#define IGB_N_PEROUT 2 +#define IGB_N_SDP 4 +#define IGB_RETA_SIZE 128 + +enum igb_filter_match_flags { + IGB_FILTER_FLAG_ETHER_TYPE = 0x1, + IGB_FILTER_FLAG_VLAN_TCI = 0x2, + IGB_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGB_FILTER_FLAG_DST_MAC_ADDR = 0x8, +}; + +#define IGB_MAX_RXNFC_FILTERS 16 + +/* RX network flow classification data structure */ +struct igb_nfc_input { + /* Byte layout in order, all values with MSB first: + * match_flags - 1 byte + * etype - 2 bytes + * vlan_tci - 2 bytes + */ + u8 match_flags; + __be16 etype; + __be16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; +}; + +struct igb_nfc_filter { + struct hlist_node nfc_node; + struct igb_nfc_input filter; + unsigned long cookie; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + +struct igb_mac_addr { + u8 addr[ETH_ALEN]; + u8 queue; + u8 state; /* bitmask */ +}; + +#define IGB_MAC_STATE_DEFAULT 0x1 +#define IGB_MAC_STATE_IN_USE 0x2 +#define IGB_MAC_STATE_SRC_ADDR 0x4 +#define IGB_MAC_STATE_QUEUE_STEERING 0x8 + +/* board specific private data structure */ +struct igb_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct net_device *netdev; + struct bpf_prog *xdp_prog; + + unsigned long state; + unsigned int flags; + + unsigned int num_q_vectors; + struct msix_entry msix_entries[MAX_MSIX_ENTRIES]; + + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[16]; + + /* RX */ + int num_rx_queues; + struct igb_ring *rx_ring[16]; + + u32 max_frame_size; + u32 min_frame_size; + + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 __iomem *io_addr; /* Mainly for iounmap use */ + + struct work_struct reset_task; + struct work_struct watchdog_task; + bool fc_autoneg; + u8 tx_timeout_factor; + struct timer_list blink_timer; + unsigned long led_status; + + /* OS defined structs */ + struct pci_dev *pdev; + + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + + u32 test_icr; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; + + int msg_enable; + + struct igb_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + /* to not mess up cache alignment, always add to the bottom */ + u16 tx_ring_count; + u16 rx_ring_count; + unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; + int vf_rate_link_speed; + u32 rss_queues; + u32 wvbr; + u32 *shadow_vfta; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_rx_ptp_check; + unsigned long last_rx_timestamp; + unsigned int ptp_flags; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGB_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGB_N_PEROUT]; + + char fw_version[32]; +#ifdef CONFIG_IGB_HWMON + struct hwmon_buff *igb_hwmon_buff; + bool ets; +#endif + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[IGB_RETA_SIZE]; + + unsigned long link_check_timeout; + int copper_tries; + struct e1000_info ei; + u16 eee_advert; + + /* RX network flow classification support */ + struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; + unsigned int nfc_filter_count; + /* lock for RX network flow classification filter */ + spinlock_t nfc_lock; + bool etype_bitmap[MAX_ETYPE_FILTER]; + + struct igb_mac_addr *mac_table; + struct vf_mac_filter vf_macs; + struct vf_mac_filter *vf_mac_list; + + /* EtherCAT device variables */ + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +/* flags controlling PTP/1588 function */ +#define IGB_PTP_ENABLED BIT(0) +#define IGB_PTP_OVERFLOW_CHECK BIT(1) + +#define IGB_FLAG_HAS_MSI BIT(0) +#define IGB_FLAG_DCA_ENABLED BIT(1) +#define IGB_FLAG_QUAD_PORT_A BIT(2) +#define IGB_FLAG_QUEUE_PAIRS BIT(3) +#define IGB_FLAG_DMAC BIT(4) +#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7) +#define IGB_FLAG_WOL_SUPPORTED BIT(8) +#define IGB_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGB_FLAG_MEDIA_RESET BIT(10) +#define IGB_FLAG_MAS_CAPABLE BIT(11) +#define IGB_FLAG_MAS_ENABLE BIT(12) +#define IGB_FLAG_HAS_MSIX BIT(13) +#define IGB_FLAG_EEE BIT(14) +#define IGB_FLAG_VLAN_PROMISC BIT(15) +#define IGB_FLAG_RX_LEGACY BIT(16) +#define IGB_FLAG_FQTSS BIT(17) + +/* Media Auto Sense */ +#define IGB_MAS_ENABLE_0 0X0001 +#define IGB_MAS_ENABLE_1 0X0002 +#define IGB_MAS_ENABLE_2 0X0004 +#define IGB_MAS_ENABLE_3 0X0008 + +/* DMA Coalescing defines */ +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ + +#define IGB_82576_TSYNC_SHIFT 19 +enum e1000_state_t { + __IGB_TESTING, + __IGB_RESETTING, + __IGB_DOWN, + __IGB_PTP_TX_IN_PROGRESS, +}; + +enum igb_boards { + board_82575, +}; + +extern char igb_driver_name[]; + +int igb_xmit_xdp_ring(struct igb_adapter *adapter, + struct igb_ring *ring, + struct xdp_frame *xdpf); +int igb_open(struct net_device *netdev); +int igb_close(struct net_device *netdev); +int igb_up(struct igb_adapter *); +void igb_down(struct igb_adapter *); +void igb_reinit_locked(struct igb_adapter *); +void igb_reset(struct igb_adapter *); +int igb_reinit_queues(struct igb_adapter *); +void igb_write_rss_indir_tbl(struct igb_adapter *); +int igb_set_spd_dplx(struct igb_adapter *, u32, u8); +int igb_setup_tx_resources(struct igb_ring *); +int igb_setup_rx_resources(struct igb_ring *); +void igb_free_tx_resources(struct igb_ring *); +void igb_free_rx_resources(struct igb_ring *); +void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +void igb_setup_tctl(struct igb_adapter *); +void igb_setup_rctl(struct igb_adapter *); +void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *); +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +void igb_alloc_rx_buffers(struct igb_ring *, u16); +void igb_update_stats(struct igb_adapter *); +bool igb_has_link(struct igb_adapter *adapter); +void igb_set_ethtool_ops(struct net_device *); +void igb_power_up_link(struct igb_adapter *); +void igb_set_fw_version(struct igb_adapter *); +void igb_ptp_init(struct igb_adapter *adapter); +void igb_ptp_stop(struct igb_adapter *adapter); +void igb_ptp_reset(struct igb_adapter *adapter); +void igb_ptp_suspend(struct igb_adapter *adapter); +void igb_ptp_rx_hang(struct igb_adapter *adapter); +void igb_ptp_tx_hang(struct igb_adapter *adapter); +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + ktime_t *timestamp); +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); +unsigned int igb_get_max_rss_queues(struct igb_adapter *); +#ifdef CONFIG_IGB_HWMON +void igb_sysfs_exit(struct igb_adapter *adapter); +int igb_sysfs_init(struct igb_adapter *adapter); +#endif +static inline s32 igb_reset_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +int igb_add_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); +int igb_erase_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); + +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); + +#endif /* _IGB_H_ */ diff --git a/devices/igb/igb-5.14-orig.h b/devices/igb/igb-5.14-orig.h new file mode 100644 index 00000000..2d3daf02 --- /dev/null +++ b/devices/igb/igb-5.14-orig.h @@ -0,0 +1,808 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGB_H_ +#define _IGB_H_ + +#include "e1000_mac.h" +#include "e1000_82575.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct igb_adapter; + +#define E1000_PCS_CFG_IGN_SD 1 + +/* Interrupt defines */ +#define IGB_START_ITR 648 /* ~6000 ints/sec */ +#define IGB_4K_ITR 980 +#define IGB_20K_ITR 196 +#define IGB_70K_ITR 56 + +/* TX/RX descriptor defines */ +#define IGB_DEFAULT_TXD 256 +#define IGB_DEFAULT_TX_WORK 128 +#define IGB_MIN_TXD 80 +#define IGB_MAX_TXD 4096 + +#define IGB_DEFAULT_RXD 256 +#define IGB_MIN_RXD 80 +#define IGB_MAX_RXD 4096 + +#define IGB_DEFAULT_ITR 3 /* dynamic */ +#define IGB_MAX_ITR_USECS 10000 +#define IGB_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_Q_VECTORS 8 +#define MAX_MSIX_ENTRIES 10 + +/* Transmit and receive queues */ +#define IGB_MAX_RX_QUEUES 8 +#define IGB_MAX_RX_QUEUES_82575 4 +#define IGB_MAX_RX_QUEUES_I211 2 +#define IGB_MAX_TX_QUEUES 8 +#define IGB_MAX_VF_MC_ENTRIES 30 +#define IGB_MAX_VF_FUNCTIONS 8 +#define IGB_MAX_VFTA_ENTRIES 128 +#define IGB_82576_VF_DEV_ID 0x10CA +#define IGB_I350_VF_DEV_ID 0x1520 + +/* NVM version defines */ +#define IGB_MAJOR_MASK 0xF000 +#define IGB_MINOR_MASK 0x0FF0 +#define IGB_BUILD_MASK 0x000F +#define IGB_COMB_VER_MASK 0x00FF +#define IGB_MAJOR_SHIFT 12 +#define IGB_MINOR_SHIFT 4 +#define IGB_COMB_VER_SHFT 8 +#define IGB_NVM_VER_INVALID 0xFFFF +#define IGB_ETRACK_SHIFT 16 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGB_I210_TX_LATENCY_10 9542 +#define IGB_I210_TX_LATENCY_100 1024 +#define IGB_I210_TX_LATENCY_1000 178 +#define IGB_I210_RX_LATENCY_10 20662 +#define IGB_I210_RX_LATENCY_100 2213 +#define IGB_I210_RX_LATENCY_1000 448 + +/* XDP */ +#define IGB_XDP_PASS 0 +#define IGB_XDP_CONSUMED BIT(0) +#define IGB_XDP_TX BIT(1) +#define IGB_XDP_REDIR BIT(2) + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u32 flags; + unsigned long last_nack; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; + bool spoofchk_enabled; + bool trusted; +}; + +/* Number of unicast MAC filters reserved for the PF in the RAR registers */ +#define IGB_PF_MAC_FILTERS_RESERVED 3 + +struct vf_mac_filter { + struct list_head l; + int vf; + bool free; + u8 vf_mac[ETH_ALEN]; +}; + +#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ +#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ +#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_RX_HTHRESH 8 +#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_TX_HTHRESH 1 +#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4) +#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16) + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + +/* Supported Rx Buffer Sizes */ +#define IGB_RXBUFFER_256 256 +#define IGB_RXBUFFER_1536 1536 +#define IGB_RXBUFFER_2048 2048 +#define IGB_RXBUFFER_3072 3072 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +#define IGB_TS_HDR_LEN 16 + +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the 3K + * buffers. + */ +#if (PAGE_SIZE < 8192) +#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN) +#define IGB_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048)) + +static inline int igb_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int igb_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (IGB_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = IGB_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return igb_compute_pad(rx_buf_len); +} + +#define IGB_SKB_PAD igb_skb_pad() +#else +#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define IGB_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define AUTO_ALL_MODES 0 +#define IGB_EEPROM_APME 0x0400 + +#ifndef IGB_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define IGB_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define IGB_MNG_VLAN_NONE -1 + +enum igb_tx_flags { + /* cmd_type flags */ + IGB_TX_FLAGS_VLAN = 0x01, + IGB_TX_FLAGS_TSO = 0x02, + IGB_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGB_TX_FLAGS_IPV4 = 0x10, + IGB_TX_FLAGS_CSUM = 0x20, +}; + +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGB_MAX_TXD_PWR 15 +#define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* EEPROM byte offsets */ +#define IGB_SFF_8472_SWAP 0x5C +#define IGB_SFF_8472_COMP 0x5E + +/* Bitmasks */ +#define IGB_SFF_ADDRESSING_MODE 0x4 +#define IGB_SFF_8472_UNSUP 0x00 + +/* TX resources are shared between XDP and netstack + * and we need to tag the buffer type to distinguish them + */ +enum igb_tx_buf_type { + IGB_TYPE_SKB = 0, + IGB_TYPE_XDP, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igb_tx_buf_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_ring_container { + struct igb_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igb_ring { + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct bpf_prog *xdp_prog; + struct device *dev; /* device pointer for dma mapping */ + union { /* array of buffer info structs */ + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igb_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct sk_buff *skb; + struct igb_rx_queue_stats rx_stats; + struct u64_stats_sync rx_syncp; + }; + }; + struct xdp_rxq_info xdp_rxq; +} ____cacheline_internodealigned_in_smp; + +struct igb_q_vector { + struct igb_adapter *adapter; /* backlink */ + int cpu; /* CPU for DCA */ + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + void __iomem *itr_register; + + struct igb_ring_container rx, tx; + + struct napi_struct napi; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igb_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_3K_BUFFER, + IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGB_RING_FLAG_RX_SCTP_CSUM, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_TX_CTX_IDX, + IGB_RING_FLAG_TX_DETECT_HANG +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define set_ring_build_skb_enabled(ring) \ + set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define clear_ring_build_skb_enabled(ring) \ + clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igb_rx_bufsz(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGB_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGB_MAX_FRAME_BUILD_SKB; +#endif + return IGB_RXBUFFER_2048; +} + +static inline unsigned int igb_rx_pg_order(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring)) + +#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + +#define IGB_RX_DESC(R, i) \ + (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) +#define IGB_TX_DESC(R, i) \ + (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) +#define IGB_TX_CTXTDESC(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) + +/* igb_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* igb_desc_unused - calculate if we have unused descriptors */ +static inline int igb_desc_unused(struct igb_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +#ifdef CONFIG_IGB_HWMON + +#define IGB_HWMON_TYPE_LOC 0 +#define IGB_HWMON_TYPE_TEMP 1 +#define IGB_HWMON_TYPE_CAUTION 2 +#define IGB_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; + }; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4]; + unsigned int n_hwmon; + }; +#endif + +/* The number of L2 ether-type filter registers, Index 3 is reserved + * for PTP 1588 timestamp + */ +#define MAX_ETYPE_FILTER (4 - 1) +/* ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters here!! + * + * Current filters: Filter 3 + */ +#define IGB_ETQF_FILTER_1588 3 + +#define IGB_N_EXTTS 2 +#define IGB_N_PEROUT 2 +#define IGB_N_SDP 4 +#define IGB_RETA_SIZE 128 + +enum igb_filter_match_flags { + IGB_FILTER_FLAG_ETHER_TYPE = 0x1, + IGB_FILTER_FLAG_VLAN_TCI = 0x2, + IGB_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGB_FILTER_FLAG_DST_MAC_ADDR = 0x8, +}; + +#define IGB_MAX_RXNFC_FILTERS 16 + +/* RX network flow classification data structure */ +struct igb_nfc_input { + /* Byte layout in order, all values with MSB first: + * match_flags - 1 byte + * etype - 2 bytes + * vlan_tci - 2 bytes + */ + u8 match_flags; + __be16 etype; + __be16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; +}; + +struct igb_nfc_filter { + struct hlist_node nfc_node; + struct igb_nfc_input filter; + unsigned long cookie; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + +struct igb_mac_addr { + u8 addr[ETH_ALEN]; + u8 queue; + u8 state; /* bitmask */ +}; + +#define IGB_MAC_STATE_DEFAULT 0x1 +#define IGB_MAC_STATE_IN_USE 0x2 +#define IGB_MAC_STATE_SRC_ADDR 0x4 +#define IGB_MAC_STATE_QUEUE_STEERING 0x8 + +/* board specific private data structure */ +struct igb_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct net_device *netdev; + struct bpf_prog *xdp_prog; + + unsigned long state; + unsigned int flags; + + unsigned int num_q_vectors; + struct msix_entry msix_entries[MAX_MSIX_ENTRIES]; + + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[16]; + + /* RX */ + int num_rx_queues; + struct igb_ring *rx_ring[16]; + + u32 max_frame_size; + u32 min_frame_size; + + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 __iomem *io_addr; /* Mainly for iounmap use */ + + struct work_struct reset_task; + struct work_struct watchdog_task; + bool fc_autoneg; + u8 tx_timeout_factor; + struct timer_list blink_timer; + unsigned long led_status; + + /* OS defined structs */ + struct pci_dev *pdev; + + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + + u32 test_icr; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; + + int msg_enable; + + struct igb_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + /* to not mess up cache alignment, always add to the bottom */ + u16 tx_ring_count; + u16 rx_ring_count; + unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; + int vf_rate_link_speed; + u32 rss_queues; + u32 wvbr; + u32 *shadow_vfta; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_rx_ptp_check; + unsigned long last_rx_timestamp; + unsigned int ptp_flags; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGB_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGB_N_PEROUT]; + + char fw_version[32]; +#ifdef CONFIG_IGB_HWMON + struct hwmon_buff *igb_hwmon_buff; + bool ets; +#endif + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[IGB_RETA_SIZE]; + + unsigned long link_check_timeout; + int copper_tries; + struct e1000_info ei; + u16 eee_advert; + + /* RX network flow classification support */ + struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; + unsigned int nfc_filter_count; + /* lock for RX network flow classification filter */ + spinlock_t nfc_lock; + bool etype_bitmap[MAX_ETYPE_FILTER]; + + struct igb_mac_addr *mac_table; + struct vf_mac_filter vf_macs; + struct vf_mac_filter *vf_mac_list; +}; + +/* flags controlling PTP/1588 function */ +#define IGB_PTP_ENABLED BIT(0) +#define IGB_PTP_OVERFLOW_CHECK BIT(1) + +#define IGB_FLAG_HAS_MSI BIT(0) +#define IGB_FLAG_DCA_ENABLED BIT(1) +#define IGB_FLAG_QUAD_PORT_A BIT(2) +#define IGB_FLAG_QUEUE_PAIRS BIT(3) +#define IGB_FLAG_DMAC BIT(4) +#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7) +#define IGB_FLAG_WOL_SUPPORTED BIT(8) +#define IGB_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGB_FLAG_MEDIA_RESET BIT(10) +#define IGB_FLAG_MAS_CAPABLE BIT(11) +#define IGB_FLAG_MAS_ENABLE BIT(12) +#define IGB_FLAG_HAS_MSIX BIT(13) +#define IGB_FLAG_EEE BIT(14) +#define IGB_FLAG_VLAN_PROMISC BIT(15) +#define IGB_FLAG_RX_LEGACY BIT(16) +#define IGB_FLAG_FQTSS BIT(17) + +/* Media Auto Sense */ +#define IGB_MAS_ENABLE_0 0X0001 +#define IGB_MAS_ENABLE_1 0X0002 +#define IGB_MAS_ENABLE_2 0X0004 +#define IGB_MAS_ENABLE_3 0X0008 + +/* DMA Coalescing defines */ +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ + +#define IGB_82576_TSYNC_SHIFT 19 +enum e1000_state_t { + __IGB_TESTING, + __IGB_RESETTING, + __IGB_DOWN, + __IGB_PTP_TX_IN_PROGRESS, +}; + +enum igb_boards { + board_82575, +}; + +extern char igb_driver_name[]; + +int igb_xmit_xdp_ring(struct igb_adapter *adapter, + struct igb_ring *ring, + struct xdp_frame *xdpf); +int igb_open(struct net_device *netdev); +int igb_close(struct net_device *netdev); +int igb_up(struct igb_adapter *); +void igb_down(struct igb_adapter *); +void igb_reinit_locked(struct igb_adapter *); +void igb_reset(struct igb_adapter *); +int igb_reinit_queues(struct igb_adapter *); +void igb_write_rss_indir_tbl(struct igb_adapter *); +int igb_set_spd_dplx(struct igb_adapter *, u32, u8); +int igb_setup_tx_resources(struct igb_ring *); +int igb_setup_rx_resources(struct igb_ring *); +void igb_free_tx_resources(struct igb_ring *); +void igb_free_rx_resources(struct igb_ring *); +void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +void igb_setup_tctl(struct igb_adapter *); +void igb_setup_rctl(struct igb_adapter *); +void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *); +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +void igb_alloc_rx_buffers(struct igb_ring *, u16); +void igb_update_stats(struct igb_adapter *); +bool igb_has_link(struct igb_adapter *adapter); +void igb_set_ethtool_ops(struct net_device *); +void igb_power_up_link(struct igb_adapter *); +void igb_set_fw_version(struct igb_adapter *); +void igb_ptp_init(struct igb_adapter *adapter); +void igb_ptp_stop(struct igb_adapter *adapter); +void igb_ptp_reset(struct igb_adapter *adapter); +void igb_ptp_suspend(struct igb_adapter *adapter); +void igb_ptp_rx_hang(struct igb_adapter *adapter); +void igb_ptp_tx_hang(struct igb_adapter *adapter); +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + ktime_t *timestamp); +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); +unsigned int igb_get_max_rss_queues(struct igb_adapter *); +#ifdef CONFIG_IGB_HWMON +void igb_sysfs_exit(struct igb_adapter *adapter); +int igb_sysfs_init(struct igb_adapter *adapter); +#endif +static inline s32 igb_reset_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +int igb_add_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); +int igb_erase_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); + +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); + +#endif /* _IGB_H_ */ diff --git a/devices/igb/igb-6.1-ethercat.h b/devices/igb/igb-6.1-ethercat.h new file mode 100644 index 00000000..78b9125a --- /dev/null +++ b/devices/igb/igb-6.1-ethercat.h @@ -0,0 +1,817 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGB_H_ +#define _IGB_H_ + +#include "e1000_mac-6.1-ethercat.h" +#include "e1000_82575-6.1-ethercat.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* EtherCAT header file */ +#include "../ecdev.h" + +struct igb_adapter; + +#define E1000_PCS_CFG_IGN_SD 1 + +/* Interrupt defines */ +#define IGB_START_ITR 648 /* ~6000 ints/sec */ +#define IGB_4K_ITR 980 +#define IGB_20K_ITR 196 +#define IGB_70K_ITR 56 + +/* TX/RX descriptor defines */ +#define IGB_DEFAULT_TXD 256 +#define IGB_DEFAULT_TX_WORK 128 +#define IGB_MIN_TXD 80 +#define IGB_MAX_TXD 4096 + +#define IGB_DEFAULT_RXD 256 +#define IGB_MIN_RXD 80 +#define IGB_MAX_RXD 4096 + +#define IGB_DEFAULT_ITR 3 /* dynamic */ +#define IGB_MAX_ITR_USECS 10000 +#define IGB_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_Q_VECTORS 8 +#define MAX_MSIX_ENTRIES 10 + +/* Transmit and receive queues */ +#define IGB_MAX_RX_QUEUES 8 +#define IGB_MAX_RX_QUEUES_82575 4 +#define IGB_MAX_RX_QUEUES_I211 2 +#define IGB_MAX_TX_QUEUES 8 +#define IGB_MAX_VF_MC_ENTRIES 30 +#define IGB_MAX_VF_FUNCTIONS 8 +#define IGB_MAX_VFTA_ENTRIES 128 +#define IGB_82576_VF_DEV_ID 0x10CA +#define IGB_I350_VF_DEV_ID 0x1520 + +/* NVM version defines */ +#define IGB_MAJOR_MASK 0xF000 +#define IGB_MINOR_MASK 0x0FF0 +#define IGB_BUILD_MASK 0x000F +#define IGB_COMB_VER_MASK 0x00FF +#define IGB_MAJOR_SHIFT 12 +#define IGB_MINOR_SHIFT 4 +#define IGB_COMB_VER_SHFT 8 +#define IGB_NVM_VER_INVALID 0xFFFF +#define IGB_ETRACK_SHIFT 16 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGB_I210_TX_LATENCY_10 9542 +#define IGB_I210_TX_LATENCY_100 1024 +#define IGB_I210_TX_LATENCY_1000 178 +#define IGB_I210_RX_LATENCY_10 20662 +#define IGB_I210_RX_LATENCY_100 2213 +#define IGB_I210_RX_LATENCY_1000 448 + +/* XDP */ +#define IGB_XDP_PASS 0 +#define IGB_XDP_CONSUMED BIT(0) +#define IGB_XDP_TX BIT(1) +#define IGB_XDP_REDIR BIT(2) + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u32 flags; + unsigned long last_nack; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; + bool spoofchk_enabled; + bool trusted; +}; + +/* Number of unicast MAC filters reserved for the PF in the RAR registers */ +#define IGB_PF_MAC_FILTERS_RESERVED 3 + +struct vf_mac_filter { + struct list_head l; + int vf; + bool free; + u8 vf_mac[ETH_ALEN]; +}; + +#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ +#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ +#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_RX_HTHRESH 8 +#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_TX_HTHRESH 1 +#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4) +#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16) + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + +/* Supported Rx Buffer Sizes */ +#define IGB_RXBUFFER_256 256 +#define IGB_RXBUFFER_1536 1536 +#define IGB_RXBUFFER_2048 2048 +#define IGB_RXBUFFER_3072 3072 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +#define IGB_TS_HDR_LEN 16 + +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the 3K + * buffers. + */ +#if (PAGE_SIZE < 8192) +#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN) +#define IGB_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048)) + +static inline int igb_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int igb_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (IGB_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = IGB_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return igb_compute_pad(rx_buf_len); +} + +#define IGB_SKB_PAD igb_skb_pad() +#else +#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define IGB_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define AUTO_ALL_MODES 0 +#define IGB_EEPROM_APME 0x0400 + +#ifndef IGB_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define IGB_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define IGB_MNG_VLAN_NONE -1 + +enum igb_tx_flags { + /* cmd_type flags */ + IGB_TX_FLAGS_VLAN = 0x01, + IGB_TX_FLAGS_TSO = 0x02, + IGB_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGB_TX_FLAGS_IPV4 = 0x10, + IGB_TX_FLAGS_CSUM = 0x20, +}; + +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGB_MAX_TXD_PWR 15 +#define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* EEPROM byte offsets */ +#define IGB_SFF_8472_SWAP 0x5C +#define IGB_SFF_8472_COMP 0x5E + +/* Bitmasks */ +#define IGB_SFF_ADDRESSING_MODE 0x4 +#define IGB_SFF_8472_UNSUP 0x00 + +/* TX resources are shared between XDP and netstack + * and we need to tag the buffer type to distinguish them + */ +enum igb_tx_buf_type { + IGB_TYPE_SKB = 0, + IGB_TYPE_XDP, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igb_tx_buf_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_ring_container { + struct igb_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igb_ring { + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct bpf_prog *xdp_prog; + struct device *dev; /* device pointer for dma mapping */ + union { /* array of buffer info structs */ + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igb_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct sk_buff *skb; + struct igb_rx_queue_stats rx_stats; + struct u64_stats_sync rx_syncp; + }; + }; + struct xdp_rxq_info xdp_rxq; +} ____cacheline_internodealigned_in_smp; + +struct igb_q_vector { + struct igb_adapter *adapter; /* backlink */ + int cpu; /* CPU for DCA */ + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + void __iomem *itr_register; + + struct igb_ring_container rx, tx; + + struct napi_struct napi; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igb_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_3K_BUFFER, + IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGB_RING_FLAG_RX_SCTP_CSUM, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_TX_CTX_IDX, + IGB_RING_FLAG_TX_DETECT_HANG +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define set_ring_build_skb_enabled(ring) \ + set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define clear_ring_build_skb_enabled(ring) \ + clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igb_rx_bufsz(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGB_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGB_MAX_FRAME_BUILD_SKB; +#endif + return IGB_RXBUFFER_2048; +} + +static inline unsigned int igb_rx_pg_order(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring)) + +#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + +#define IGB_RX_DESC(R, i) \ + (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) +#define IGB_TX_DESC(R, i) \ + (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) +#define IGB_TX_CTXTDESC(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) + +/* igb_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* igb_desc_unused - calculate if we have unused descriptors */ +static inline int igb_desc_unused(struct igb_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +#ifdef CONFIG_IGB_HWMON + +#define IGB_HWMON_TYPE_LOC 0 +#define IGB_HWMON_TYPE_TEMP 1 +#define IGB_HWMON_TYPE_CAUTION 2 +#define IGB_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; + }; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4]; + unsigned int n_hwmon; + }; +#endif + +/* The number of L2 ether-type filter registers, Index 3 is reserved + * for PTP 1588 timestamp + */ +#define MAX_ETYPE_FILTER (4 - 1) +/* ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters here!! + * + * Current filters: Filter 3 + */ +#define IGB_ETQF_FILTER_1588 3 + +#define IGB_N_EXTTS 2 +#define IGB_N_PEROUT 2 +#define IGB_N_SDP 4 +#define IGB_RETA_SIZE 128 + +enum igb_filter_match_flags { + IGB_FILTER_FLAG_ETHER_TYPE = 0x1, + IGB_FILTER_FLAG_VLAN_TCI = 0x2, + IGB_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGB_FILTER_FLAG_DST_MAC_ADDR = 0x8, +}; + +#define IGB_MAX_RXNFC_FILTERS 16 + +/* RX network flow classification data structure */ +struct igb_nfc_input { + /* Byte layout in order, all values with MSB first: + * match_flags - 1 byte + * etype - 2 bytes + * vlan_tci - 2 bytes + */ + u8 match_flags; + __be16 etype; + __be16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; +}; + +struct igb_nfc_filter { + struct hlist_node nfc_node; + struct igb_nfc_input filter; + unsigned long cookie; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + +struct igb_mac_addr { + u8 addr[ETH_ALEN]; + u8 queue; + u8 state; /* bitmask */ +}; + +#define IGB_MAC_STATE_DEFAULT 0x1 +#define IGB_MAC_STATE_IN_USE 0x2 +#define IGB_MAC_STATE_SRC_ADDR 0x4 +#define IGB_MAC_STATE_QUEUE_STEERING 0x8 + +/* board specific private data structure */ +struct igb_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct net_device *netdev; + struct bpf_prog *xdp_prog; + + unsigned long state; + unsigned int flags; + + unsigned int num_q_vectors; + struct msix_entry msix_entries[MAX_MSIX_ENTRIES]; + + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[16]; + + /* RX */ + int num_rx_queues; + struct igb_ring *rx_ring[16]; + + u32 max_frame_size; + u32 min_frame_size; + + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 __iomem *io_addr; /* Mainly for iounmap use */ + + struct work_struct reset_task; + struct work_struct watchdog_task; + bool fc_autoneg; + u8 tx_timeout_factor; + struct timer_list blink_timer; + unsigned long led_status; + + /* OS defined structs */ + struct pci_dev *pdev; + + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + + u32 test_icr; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; + + int msg_enable; + + struct igb_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + /* to not mess up cache alignment, always add to the bottom */ + u16 tx_ring_count; + u16 rx_ring_count; + unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; + int vf_rate_link_speed; + u32 rss_queues; + u32 wvbr; + u32 *shadow_vfta; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_rx_ptp_check; + unsigned long last_rx_timestamp; + unsigned int ptp_flags; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGB_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGB_N_PEROUT]; + + char fw_version[32]; +#ifdef CONFIG_IGB_HWMON + struct hwmon_buff *igb_hwmon_buff; + bool ets; +#endif + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[IGB_RETA_SIZE]; + + unsigned long link_check_timeout; + int copper_tries; + struct e1000_info ei; + u16 eee_advert; + + /* RX network flow classification support */ + struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; + unsigned int nfc_filter_count; + /* lock for RX network flow classification filter */ + spinlock_t nfc_lock; + bool etype_bitmap[MAX_ETYPE_FILTER]; + + struct igb_mac_addr *mac_table; + struct vf_mac_filter vf_macs; + struct vf_mac_filter *vf_mac_list; + /* lock for VF resources */ + spinlock_t vfs_lock; + + /* EtherCAT device variables */ + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +/* flags controlling PTP/1588 function */ +#define IGB_PTP_ENABLED BIT(0) +#define IGB_PTP_OVERFLOW_CHECK BIT(1) + +#define IGB_FLAG_HAS_MSI BIT(0) +#define IGB_FLAG_DCA_ENABLED BIT(1) +#define IGB_FLAG_QUAD_PORT_A BIT(2) +#define IGB_FLAG_QUEUE_PAIRS BIT(3) +#define IGB_FLAG_DMAC BIT(4) +#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7) +#define IGB_FLAG_WOL_SUPPORTED BIT(8) +#define IGB_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGB_FLAG_MEDIA_RESET BIT(10) +#define IGB_FLAG_MAS_CAPABLE BIT(11) +#define IGB_FLAG_MAS_ENABLE BIT(12) +#define IGB_FLAG_HAS_MSIX BIT(13) +#define IGB_FLAG_EEE BIT(14) +#define IGB_FLAG_VLAN_PROMISC BIT(15) +#define IGB_FLAG_RX_LEGACY BIT(16) +#define IGB_FLAG_FQTSS BIT(17) + +/* Media Auto Sense */ +#define IGB_MAS_ENABLE_0 0X0001 +#define IGB_MAS_ENABLE_1 0X0002 +#define IGB_MAS_ENABLE_2 0X0004 +#define IGB_MAS_ENABLE_3 0X0008 + +/* DMA Coalescing defines */ +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ + +#define IGB_82576_TSYNC_SHIFT 19 +enum e1000_state_t { + __IGB_TESTING, + __IGB_RESETTING, + __IGB_DOWN, + __IGB_PTP_TX_IN_PROGRESS, +}; + +enum igb_boards { + board_82575, +}; + +extern char igb_driver_name[]; + +int igb_xmit_xdp_ring(struct igb_adapter *adapter, + struct igb_ring *ring, + struct xdp_frame *xdpf); +int igb_open(struct net_device *netdev); +int igb_close(struct net_device *netdev); +int igb_up(struct igb_adapter *); +void igb_down(struct igb_adapter *); +void igb_reinit_locked(struct igb_adapter *); +void igb_reset(struct igb_adapter *); +int igb_reinit_queues(struct igb_adapter *); +void igb_write_rss_indir_tbl(struct igb_adapter *); +int igb_set_spd_dplx(struct igb_adapter *, u32, u8); +int igb_setup_tx_resources(struct igb_ring *); +int igb_setup_rx_resources(struct igb_ring *); +void igb_free_tx_resources(struct igb_ring *); +void igb_free_rx_resources(struct igb_ring *); +void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +void igb_setup_tctl(struct igb_adapter *); +void igb_setup_rctl(struct igb_adapter *); +void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *); +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +void igb_alloc_rx_buffers(struct igb_ring *, u16); +void igb_update_stats(struct igb_adapter *); +bool igb_has_link(struct igb_adapter *adapter); +void igb_set_ethtool_ops(struct net_device *); +void igb_power_up_link(struct igb_adapter *); +void igb_set_fw_version(struct igb_adapter *); +void igb_ptp_init(struct igb_adapter *adapter); +void igb_ptp_stop(struct igb_adapter *adapter); +void igb_ptp_reset(struct igb_adapter *adapter); +void igb_ptp_suspend(struct igb_adapter *adapter); +void igb_ptp_rx_hang(struct igb_adapter *adapter); +void igb_ptp_tx_hang(struct igb_adapter *adapter); +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + ktime_t *timestamp); +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); +unsigned int igb_get_max_rss_queues(struct igb_adapter *); +#ifdef CONFIG_IGB_HWMON +void igb_sysfs_exit(struct igb_adapter *adapter); +int igb_sysfs_init(struct igb_adapter *adapter); +#endif +static inline s32 igb_reset_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +int igb_add_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); +int igb_erase_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); + +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); + +#endif /* _IGB_H_ */ diff --git a/devices/igb/igb-6.1-orig.h b/devices/igb/igb-6.1-orig.h new file mode 100644 index 00000000..015b7814 --- /dev/null +++ b/devices/igb/igb-6.1-orig.h @@ -0,0 +1,810 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGB_H_ +#define _IGB_H_ + +#include "e1000_mac.h" +#include "e1000_82575.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct igb_adapter; + +#define E1000_PCS_CFG_IGN_SD 1 + +/* Interrupt defines */ +#define IGB_START_ITR 648 /* ~6000 ints/sec */ +#define IGB_4K_ITR 980 +#define IGB_20K_ITR 196 +#define IGB_70K_ITR 56 + +/* TX/RX descriptor defines */ +#define IGB_DEFAULT_TXD 256 +#define IGB_DEFAULT_TX_WORK 128 +#define IGB_MIN_TXD 80 +#define IGB_MAX_TXD 4096 + +#define IGB_DEFAULT_RXD 256 +#define IGB_MIN_RXD 80 +#define IGB_MAX_RXD 4096 + +#define IGB_DEFAULT_ITR 3 /* dynamic */ +#define IGB_MAX_ITR_USECS 10000 +#define IGB_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_Q_VECTORS 8 +#define MAX_MSIX_ENTRIES 10 + +/* Transmit and receive queues */ +#define IGB_MAX_RX_QUEUES 8 +#define IGB_MAX_RX_QUEUES_82575 4 +#define IGB_MAX_RX_QUEUES_I211 2 +#define IGB_MAX_TX_QUEUES 8 +#define IGB_MAX_VF_MC_ENTRIES 30 +#define IGB_MAX_VF_FUNCTIONS 8 +#define IGB_MAX_VFTA_ENTRIES 128 +#define IGB_82576_VF_DEV_ID 0x10CA +#define IGB_I350_VF_DEV_ID 0x1520 + +/* NVM version defines */ +#define IGB_MAJOR_MASK 0xF000 +#define IGB_MINOR_MASK 0x0FF0 +#define IGB_BUILD_MASK 0x000F +#define IGB_COMB_VER_MASK 0x00FF +#define IGB_MAJOR_SHIFT 12 +#define IGB_MINOR_SHIFT 4 +#define IGB_COMB_VER_SHFT 8 +#define IGB_NVM_VER_INVALID 0xFFFF +#define IGB_ETRACK_SHIFT 16 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGB_I210_TX_LATENCY_10 9542 +#define IGB_I210_TX_LATENCY_100 1024 +#define IGB_I210_TX_LATENCY_1000 178 +#define IGB_I210_RX_LATENCY_10 20662 +#define IGB_I210_RX_LATENCY_100 2213 +#define IGB_I210_RX_LATENCY_1000 448 + +/* XDP */ +#define IGB_XDP_PASS 0 +#define IGB_XDP_CONSUMED BIT(0) +#define IGB_XDP_TX BIT(1) +#define IGB_XDP_REDIR BIT(2) + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u32 flags; + unsigned long last_nack; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; + bool spoofchk_enabled; + bool trusted; +}; + +/* Number of unicast MAC filters reserved for the PF in the RAR registers */ +#define IGB_PF_MAC_FILTERS_RESERVED 3 + +struct vf_mac_filter { + struct list_head l; + int vf; + bool free; + u8 vf_mac[ETH_ALEN]; +}; + +#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ +#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ +#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_RX_HTHRESH 8 +#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_TX_HTHRESH 1 +#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4) +#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16) + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + +/* Supported Rx Buffer Sizes */ +#define IGB_RXBUFFER_256 256 +#define IGB_RXBUFFER_1536 1536 +#define IGB_RXBUFFER_2048 2048 +#define IGB_RXBUFFER_3072 3072 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +#define IGB_TS_HDR_LEN 16 + +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the 3K + * buffers. + */ +#if (PAGE_SIZE < 8192) +#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN) +#define IGB_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048)) + +static inline int igb_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int igb_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (IGB_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = IGB_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return igb_compute_pad(rx_buf_len); +} + +#define IGB_SKB_PAD igb_skb_pad() +#else +#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define IGB_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define AUTO_ALL_MODES 0 +#define IGB_EEPROM_APME 0x0400 + +#ifndef IGB_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define IGB_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define IGB_MNG_VLAN_NONE -1 + +enum igb_tx_flags { + /* cmd_type flags */ + IGB_TX_FLAGS_VLAN = 0x01, + IGB_TX_FLAGS_TSO = 0x02, + IGB_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGB_TX_FLAGS_IPV4 = 0x10, + IGB_TX_FLAGS_CSUM = 0x20, +}; + +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGB_MAX_TXD_PWR 15 +#define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* EEPROM byte offsets */ +#define IGB_SFF_8472_SWAP 0x5C +#define IGB_SFF_8472_COMP 0x5E + +/* Bitmasks */ +#define IGB_SFF_ADDRESSING_MODE 0x4 +#define IGB_SFF_8472_UNSUP 0x00 + +/* TX resources are shared between XDP and netstack + * and we need to tag the buffer type to distinguish them + */ +enum igb_tx_buf_type { + IGB_TYPE_SKB = 0, + IGB_TYPE_XDP, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igb_tx_buf_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_ring_container { + struct igb_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igb_ring { + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct bpf_prog *xdp_prog; + struct device *dev; /* device pointer for dma mapping */ + union { /* array of buffer info structs */ + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igb_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct sk_buff *skb; + struct igb_rx_queue_stats rx_stats; + struct u64_stats_sync rx_syncp; + }; + }; + struct xdp_rxq_info xdp_rxq; +} ____cacheline_internodealigned_in_smp; + +struct igb_q_vector { + struct igb_adapter *adapter; /* backlink */ + int cpu; /* CPU for DCA */ + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + void __iomem *itr_register; + + struct igb_ring_container rx, tx; + + struct napi_struct napi; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igb_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_3K_BUFFER, + IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGB_RING_FLAG_RX_SCTP_CSUM, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_TX_CTX_IDX, + IGB_RING_FLAG_TX_DETECT_HANG +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define set_ring_build_skb_enabled(ring) \ + set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) +#define clear_ring_build_skb_enabled(ring) \ + clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igb_rx_bufsz(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGB_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGB_MAX_FRAME_BUILD_SKB; +#endif + return IGB_RXBUFFER_2048; +} + +static inline unsigned int igb_rx_pg_order(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring)) + +#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + +#define IGB_RX_DESC(R, i) \ + (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) +#define IGB_TX_DESC(R, i) \ + (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) +#define IGB_TX_CTXTDESC(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) + +/* igb_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* igb_desc_unused - calculate if we have unused descriptors */ +static inline int igb_desc_unused(struct igb_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +#ifdef CONFIG_IGB_HWMON + +#define IGB_HWMON_TYPE_LOC 0 +#define IGB_HWMON_TYPE_TEMP 1 +#define IGB_HWMON_TYPE_CAUTION 2 +#define IGB_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; + }; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4]; + unsigned int n_hwmon; + }; +#endif + +/* The number of L2 ether-type filter registers, Index 3 is reserved + * for PTP 1588 timestamp + */ +#define MAX_ETYPE_FILTER (4 - 1) +/* ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters here!! + * + * Current filters: Filter 3 + */ +#define IGB_ETQF_FILTER_1588 3 + +#define IGB_N_EXTTS 2 +#define IGB_N_PEROUT 2 +#define IGB_N_SDP 4 +#define IGB_RETA_SIZE 128 + +enum igb_filter_match_flags { + IGB_FILTER_FLAG_ETHER_TYPE = 0x1, + IGB_FILTER_FLAG_VLAN_TCI = 0x2, + IGB_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGB_FILTER_FLAG_DST_MAC_ADDR = 0x8, +}; + +#define IGB_MAX_RXNFC_FILTERS 16 + +/* RX network flow classification data structure */ +struct igb_nfc_input { + /* Byte layout in order, all values with MSB first: + * match_flags - 1 byte + * etype - 2 bytes + * vlan_tci - 2 bytes + */ + u8 match_flags; + __be16 etype; + __be16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; +}; + +struct igb_nfc_filter { + struct hlist_node nfc_node; + struct igb_nfc_input filter; + unsigned long cookie; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + +struct igb_mac_addr { + u8 addr[ETH_ALEN]; + u8 queue; + u8 state; /* bitmask */ +}; + +#define IGB_MAC_STATE_DEFAULT 0x1 +#define IGB_MAC_STATE_IN_USE 0x2 +#define IGB_MAC_STATE_SRC_ADDR 0x4 +#define IGB_MAC_STATE_QUEUE_STEERING 0x8 + +/* board specific private data structure */ +struct igb_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct net_device *netdev; + struct bpf_prog *xdp_prog; + + unsigned long state; + unsigned int flags; + + unsigned int num_q_vectors; + struct msix_entry msix_entries[MAX_MSIX_ENTRIES]; + + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[16]; + + /* RX */ + int num_rx_queues; + struct igb_ring *rx_ring[16]; + + u32 max_frame_size; + u32 min_frame_size; + + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 __iomem *io_addr; /* Mainly for iounmap use */ + + struct work_struct reset_task; + struct work_struct watchdog_task; + bool fc_autoneg; + u8 tx_timeout_factor; + struct timer_list blink_timer; + unsigned long led_status; + + /* OS defined structs */ + struct pci_dev *pdev; + + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + + u32 test_icr; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; + + int msg_enable; + + struct igb_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + /* to not mess up cache alignment, always add to the bottom */ + u16 tx_ring_count; + u16 rx_ring_count; + unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; + int vf_rate_link_speed; + u32 rss_queues; + u32 wvbr; + u32 *shadow_vfta; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_rx_ptp_check; + unsigned long last_rx_timestamp; + unsigned int ptp_flags; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGB_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGB_N_PEROUT]; + + char fw_version[32]; +#ifdef CONFIG_IGB_HWMON + struct hwmon_buff *igb_hwmon_buff; + bool ets; +#endif + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[IGB_RETA_SIZE]; + + unsigned long link_check_timeout; + int copper_tries; + struct e1000_info ei; + u16 eee_advert; + + /* RX network flow classification support */ + struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; + unsigned int nfc_filter_count; + /* lock for RX network flow classification filter */ + spinlock_t nfc_lock; + bool etype_bitmap[MAX_ETYPE_FILTER]; + + struct igb_mac_addr *mac_table; + struct vf_mac_filter vf_macs; + struct vf_mac_filter *vf_mac_list; + /* lock for VF resources */ + spinlock_t vfs_lock; +}; + +/* flags controlling PTP/1588 function */ +#define IGB_PTP_ENABLED BIT(0) +#define IGB_PTP_OVERFLOW_CHECK BIT(1) + +#define IGB_FLAG_HAS_MSI BIT(0) +#define IGB_FLAG_DCA_ENABLED BIT(1) +#define IGB_FLAG_QUAD_PORT_A BIT(2) +#define IGB_FLAG_QUEUE_PAIRS BIT(3) +#define IGB_FLAG_DMAC BIT(4) +#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7) +#define IGB_FLAG_WOL_SUPPORTED BIT(8) +#define IGB_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGB_FLAG_MEDIA_RESET BIT(10) +#define IGB_FLAG_MAS_CAPABLE BIT(11) +#define IGB_FLAG_MAS_ENABLE BIT(12) +#define IGB_FLAG_HAS_MSIX BIT(13) +#define IGB_FLAG_EEE BIT(14) +#define IGB_FLAG_VLAN_PROMISC BIT(15) +#define IGB_FLAG_RX_LEGACY BIT(16) +#define IGB_FLAG_FQTSS BIT(17) + +/* Media Auto Sense */ +#define IGB_MAS_ENABLE_0 0X0001 +#define IGB_MAS_ENABLE_1 0X0002 +#define IGB_MAS_ENABLE_2 0X0004 +#define IGB_MAS_ENABLE_3 0X0008 + +/* DMA Coalescing defines */ +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ + +#define IGB_82576_TSYNC_SHIFT 19 +enum e1000_state_t { + __IGB_TESTING, + __IGB_RESETTING, + __IGB_DOWN, + __IGB_PTP_TX_IN_PROGRESS, +}; + +enum igb_boards { + board_82575, +}; + +extern char igb_driver_name[]; + +int igb_xmit_xdp_ring(struct igb_adapter *adapter, + struct igb_ring *ring, + struct xdp_frame *xdpf); +int igb_open(struct net_device *netdev); +int igb_close(struct net_device *netdev); +int igb_up(struct igb_adapter *); +void igb_down(struct igb_adapter *); +void igb_reinit_locked(struct igb_adapter *); +void igb_reset(struct igb_adapter *); +int igb_reinit_queues(struct igb_adapter *); +void igb_write_rss_indir_tbl(struct igb_adapter *); +int igb_set_spd_dplx(struct igb_adapter *, u32, u8); +int igb_setup_tx_resources(struct igb_ring *); +int igb_setup_rx_resources(struct igb_ring *); +void igb_free_tx_resources(struct igb_ring *); +void igb_free_rx_resources(struct igb_ring *); +void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +void igb_setup_tctl(struct igb_adapter *); +void igb_setup_rctl(struct igb_adapter *); +void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *); +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +void igb_alloc_rx_buffers(struct igb_ring *, u16); +void igb_update_stats(struct igb_adapter *); +bool igb_has_link(struct igb_adapter *adapter); +void igb_set_ethtool_ops(struct net_device *); +void igb_power_up_link(struct igb_adapter *); +void igb_set_fw_version(struct igb_adapter *); +void igb_ptp_init(struct igb_adapter *adapter); +void igb_ptp_stop(struct igb_adapter *adapter); +void igb_ptp_reset(struct igb_adapter *adapter); +void igb_ptp_suspend(struct igb_adapter *adapter); +void igb_ptp_rx_hang(struct igb_adapter *adapter); +void igb_ptp_tx_hang(struct igb_adapter *adapter); +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + ktime_t *timestamp); +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); +unsigned int igb_get_max_rss_queues(struct igb_adapter *); +#ifdef CONFIG_IGB_HWMON +void igb_sysfs_exit(struct igb_adapter *adapter); +int igb_sysfs_init(struct igb_adapter *adapter); +#endif +static inline s32 igb_reset_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +int igb_add_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); +int igb_erase_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); + +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); + +#endif /* _IGB_H_ */ diff --git a/devices/igb/igb_ethtool-5.14-ethercat.c b/devices/igb/igb_ethtool-5.14-ethercat.c new file mode 100644 index 00000000..6e2b82b5 --- /dev/null +++ b/devices/igb/igb_ethtool-5.14-ethercat.c @@ -0,0 +1,3535 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* ethtool support for igb */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igb-5.14-ethercat.h" + + +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + +struct igb_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGB_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igb_adapter, _stat), \ + .stat_offset = offsetof(struct igb_adapter, _stat) \ +} +static const struct igb_stats igb_gstrings_stats[] = { + IGB_STAT("rx_packets", stats.gprc), + IGB_STAT("tx_packets", stats.gptc), + IGB_STAT("rx_bytes", stats.gorc), + IGB_STAT("tx_bytes", stats.gotc), + IGB_STAT("rx_broadcast", stats.bprc), + IGB_STAT("tx_broadcast", stats.bptc), + IGB_STAT("rx_multicast", stats.mprc), + IGB_STAT("tx_multicast", stats.mptc), + IGB_STAT("multicast", stats.mprc), + IGB_STAT("collisions", stats.colc), + IGB_STAT("rx_crc_errors", stats.crcerrs), + IGB_STAT("rx_no_buffer_count", stats.rnbc), + IGB_STAT("rx_missed_errors", stats.mpc), + IGB_STAT("tx_aborted_errors", stats.ecol), + IGB_STAT("tx_carrier_errors", stats.tncrs), + IGB_STAT("tx_window_errors", stats.latecol), + IGB_STAT("tx_abort_late_coll", stats.latecol), + IGB_STAT("tx_deferred_ok", stats.dc), + IGB_STAT("tx_single_coll_ok", stats.scc), + IGB_STAT("tx_multi_coll_ok", stats.mcc), + IGB_STAT("tx_timeout_count", tx_timeout_count), + IGB_STAT("rx_long_length_errors", stats.roc), + IGB_STAT("rx_short_length_errors", stats.ruc), + IGB_STAT("rx_align_errors", stats.algnerrc), + IGB_STAT("tx_tcp_seg_good", stats.tsctc), + IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGB_STAT("rx_flow_control_xon", stats.xonrxc), + IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGB_STAT("tx_flow_control_xon", stats.xontxc), + IGB_STAT("tx_flow_control_xoff", stats.xofftxc), + IGB_STAT("rx_long_byte_count", stats.gorc), + IGB_STAT("tx_dma_out_of_sync", stats.doosync), + IGB_STAT("tx_smbus", stats.mgptc), + IGB_STAT("rx_smbus", stats.mgprc), + IGB_STAT("dropped_smbus", stats.mgpdc), + IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGB_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +#define IGB_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} +static const struct igb_stats igb_gstrings_net_stats[] = { + IGB_NETDEV_STAT(rx_errors), + IGB_NETDEV_STAT(tx_errors), + IGB_NETDEV_STAT(tx_dropped), + IGB_NETDEV_STAT(rx_length_errors), + IGB_NETDEV_STAT(rx_over_errors), + IGB_NETDEV_STAT(rx_frame_errors), + IGB_NETDEV_STAT(rx_fifo_errors), + IGB_NETDEV_STAT(tx_fifo_errors), + IGB_NETDEV_STAT(tx_heartbeat_errors) +}; + +#define IGB_GLOBAL_STATS_LEN \ + (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) +#define IGB_NETDEV_STATS_LEN \ + (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) +#define IGB_RX_QUEUE_STATS_LEN \ + (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) + +#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ + +#define IGB_QUEUE_STATS_LEN \ + ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGB_RX_QUEUE_STATS_LEN) + \ + (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGB_TX_QUEUE_STATS_LEN)) +#define IGB_STATS_LEN \ + (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) + +enum igb_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; +#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) + +static const char igb_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGB_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGB_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igb_priv_flags_strings) + +static int igb_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u32 status; + u32 speed; + u32 supported, advertising; + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(E1000_STATUS); + if (hw->phy.media_type == e1000_media_type_copper) { + + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause); + advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->phy.autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + } else { + supported = (SUPPORTED_FIBRE | + SUPPORTED_1000baseKX_Full | + SUPPORTED_Autoneg | + SUPPORTED_Pause); + advertising = (ADVERTISED_FIBRE | + ADVERTISED_1000baseKX_Full); + if (hw->mac.type == e1000_i354) { + if ((hw->device_id == + E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + supported |= SUPPORTED_2500baseX_Full; + supported &= ~SUPPORTED_1000baseKX_Full; + advertising |= ADVERTISED_2500baseX_Full; + advertising &= ~ADVERTISED_1000baseKX_Full; + } + } + if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { + supported |= SUPPORTED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; + } + if (hw->mac.autoneg == 1) + advertising |= ADVERTISED_Autoneg; + + cmd->base.port = PORT_FIBRE; + } + if (hw->mac.autoneg != 1) + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + + switch (hw->fc.requested_mode) { + case e1000_fc_full: + advertising |= ADVERTISED_Pause; + break; + case e1000_fc_rx_pause: + advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + break; + case e1000_fc_tx_pause: + advertising |= ADVERTISED_Asym_Pause; + break; + default: + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + if (status & E1000_STATUS_LU) { + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + speed = SPEED_2500; + } else if (status & E1000_STATUS_SPEED_1000) { + speed = SPEED_1000; + } else if (status & E1000_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == e1000_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int igb_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed + */ + if (igb_check_reset_block(hw)) { + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when SoL/IDER is active.\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->phy.media_type == e1000_media_type_fiber) { + hw->phy.autoneg_advertised = advertising | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + switch (adapter->link_speed) { + case SPEED_2500: + hw->phy.autoneg_advertised = + ADVERTISED_2500baseX_Full; + break; + case SPEED_1000: + hw->phy.autoneg_advertised = + ADVERTISED_1000baseT_Full; + break; + case SPEED_100: + hw->phy.autoneg_advertised = + ADVERTISED_100baseT_Full; + break; + default: + break; + } + } else { + hw->phy.autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } + advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (igb_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__IGB_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + return 0; +} + +static u32 igb_get_link(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igb_has_link(adapter); +} + +static void igb_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == e1000_fc_rx_pause) + pause->rx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_tx_pause) + pause->tx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igb_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + int i; + + /* 100basefx does not support setting link flow control */ + if (hw->dev_spec._82575.eth_flags.e100_base_fx) + return -EINVAL; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = e1000_fc_default; + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else { + igb_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == e1000_media_type_copper) ? + igb_force_mac_fc(hw) : igb_setup_link(hw)); + + /* Make sure SRRCTL considers new fc settings for each ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + igb_setup_srrctl(adapter, ring); + } + } + + clear_bit(__IGB_RESETTING, &adapter->state); + return retval; +} + +static u32 igb_get_msglevel(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void igb_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int igb_get_regs_len(struct net_device *netdev) +{ +#define IGB_REGS_LEN 740 + return IGB_REGS_LEN * sizeof(u32); +} + +static void igb_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGB_REGS_LEN * sizeof(u32)); + + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(E1000_CTRL); + regs_buff[1] = rd32(E1000_STATUS); + regs_buff[2] = rd32(E1000_CTRL_EXT); + regs_buff[3] = rd32(E1000_MDIC); + regs_buff[4] = rd32(E1000_SCTL); + regs_buff[5] = rd32(E1000_CONNSW); + regs_buff[6] = rd32(E1000_VET); + regs_buff[7] = rd32(E1000_LEDCTL); + regs_buff[8] = rd32(E1000_PBA); + regs_buff[9] = rd32(E1000_PBS); + regs_buff[10] = rd32(E1000_FRTIMER); + regs_buff[11] = rd32(E1000_TCPTIMER); + + /* NVM Register */ + regs_buff[12] = rd32(E1000_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[13] = rd32(E1000_EICS); + regs_buff[14] = rd32(E1000_EICS); + regs_buff[15] = rd32(E1000_EIMS); + regs_buff[16] = rd32(E1000_EIMC); + regs_buff[17] = rd32(E1000_EIAC); + regs_buff[18] = rd32(E1000_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[19] = rd32(E1000_ICS); + regs_buff[20] = rd32(E1000_ICS); + regs_buff[21] = rd32(E1000_IMS); + regs_buff[22] = rd32(E1000_IMC); + regs_buff[23] = rd32(E1000_IAC); + regs_buff[24] = rd32(E1000_IAM); + regs_buff[25] = rd32(E1000_IMIRVP); + + /* Flow Control */ + regs_buff[26] = rd32(E1000_FCAL); + regs_buff[27] = rd32(E1000_FCAH); + regs_buff[28] = rd32(E1000_FCTTV); + regs_buff[29] = rd32(E1000_FCRTL); + regs_buff[30] = rd32(E1000_FCRTH); + regs_buff[31] = rd32(E1000_FCRTV); + + /* Receive */ + regs_buff[32] = rd32(E1000_RCTL); + regs_buff[33] = rd32(E1000_RXCSUM); + regs_buff[34] = rd32(E1000_RLPML); + regs_buff[35] = rd32(E1000_RFCTL); + regs_buff[36] = rd32(E1000_MRQC); + regs_buff[37] = rd32(E1000_VT_CTL); + + /* Transmit */ + regs_buff[38] = rd32(E1000_TCTL); + regs_buff[39] = rd32(E1000_TCTL_EXT); + regs_buff[40] = rd32(E1000_TIPG); + regs_buff[41] = rd32(E1000_DTXCTL); + + /* Wake Up */ + regs_buff[42] = rd32(E1000_WUC); + regs_buff[43] = rd32(E1000_WUFC); + regs_buff[44] = rd32(E1000_WUS); + regs_buff[45] = rd32(E1000_IPAV); + regs_buff[46] = rd32(E1000_WUPL); + + /* MAC */ + regs_buff[47] = rd32(E1000_PCS_CFG0); + regs_buff[48] = rd32(E1000_PCS_LCTL); + regs_buff[49] = rd32(E1000_PCS_LSTAT); + regs_buff[50] = rd32(E1000_PCS_ANADV); + regs_buff[51] = rd32(E1000_PCS_LPAB); + regs_buff[52] = rd32(E1000_PCS_NPTX); + regs_buff[53] = rd32(E1000_PCS_LPABNP); + + /* Statistics */ + regs_buff[54] = adapter->stats.crcerrs; + regs_buff[55] = adapter->stats.algnerrc; + regs_buff[56] = adapter->stats.symerrs; + regs_buff[57] = adapter->stats.rxerrc; + regs_buff[58] = adapter->stats.mpc; + regs_buff[59] = adapter->stats.scc; + regs_buff[60] = adapter->stats.ecol; + regs_buff[61] = adapter->stats.mcc; + regs_buff[62] = adapter->stats.latecol; + regs_buff[63] = adapter->stats.colc; + regs_buff[64] = adapter->stats.dc; + regs_buff[65] = adapter->stats.tncrs; + regs_buff[66] = adapter->stats.sec; + regs_buff[67] = adapter->stats.htdpmc; + regs_buff[68] = adapter->stats.rlec; + regs_buff[69] = adapter->stats.xonrxc; + regs_buff[70] = adapter->stats.xontxc; + regs_buff[71] = adapter->stats.xoffrxc; + regs_buff[72] = adapter->stats.xofftxc; + regs_buff[73] = adapter->stats.fcruc; + regs_buff[74] = adapter->stats.prc64; + regs_buff[75] = adapter->stats.prc127; + regs_buff[76] = adapter->stats.prc255; + regs_buff[77] = adapter->stats.prc511; + regs_buff[78] = adapter->stats.prc1023; + regs_buff[79] = adapter->stats.prc1522; + regs_buff[80] = adapter->stats.gprc; + regs_buff[81] = adapter->stats.bprc; + regs_buff[82] = adapter->stats.mprc; + regs_buff[83] = adapter->stats.gptc; + regs_buff[84] = adapter->stats.gorc; + regs_buff[86] = adapter->stats.gotc; + regs_buff[88] = adapter->stats.rnbc; + regs_buff[89] = adapter->stats.ruc; + regs_buff[90] = adapter->stats.rfc; + regs_buff[91] = adapter->stats.roc; + regs_buff[92] = adapter->stats.rjc; + regs_buff[93] = adapter->stats.mgprc; + regs_buff[94] = adapter->stats.mgpdc; + regs_buff[95] = adapter->stats.mgptc; + regs_buff[96] = adapter->stats.tor; + regs_buff[98] = adapter->stats.tot; + regs_buff[100] = adapter->stats.tpr; + regs_buff[101] = adapter->stats.tpt; + regs_buff[102] = adapter->stats.ptc64; + regs_buff[103] = adapter->stats.ptc127; + regs_buff[104] = adapter->stats.ptc255; + regs_buff[105] = adapter->stats.ptc511; + regs_buff[106] = adapter->stats.ptc1023; + regs_buff[107] = adapter->stats.ptc1522; + regs_buff[108] = adapter->stats.mptc; + regs_buff[109] = adapter->stats.bptc; + regs_buff[110] = adapter->stats.tsctc; + regs_buff[111] = adapter->stats.iac; + regs_buff[112] = adapter->stats.rpthc; + regs_buff[113] = adapter->stats.hgptc; + regs_buff[114] = adapter->stats.hgorc; + regs_buff[116] = adapter->stats.hgotc; + regs_buff[118] = adapter->stats.lenerrs; + regs_buff[119] = adapter->stats.scvpc; + regs_buff[120] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[129 + i] = rd32(E1000_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[133 + i] = rd32(E1000_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[137 + i] = rd32(E1000_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[141 + i] = rd32(E1000_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[145 + i] = rd32(E1000_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[153 + i] = rd32(E1000_EITR(i)); + for (i = 0; i < 8; i++) + regs_buff[163 + i] = rd32(E1000_IMIR(i)); + for (i = 0; i < 8; i++) + regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); + for (i = 0; i < 16; i++) + regs_buff[179 + i] = rd32(E1000_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[195 + i] = rd32(E1000_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[211 + i] = rd32(E1000_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[215 + i] = rd32(E1000_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[219 + i] = rd32(E1000_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[223 + i] = rd32(E1000_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[227 + i] = rd32(E1000_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); + + for (i = 0; i < 4; i++) + regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); + for (i = 0; i < 32; i++) + regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); + + regs_buff[547] = rd32(E1000_TDFH); + regs_buff[548] = rd32(E1000_TDFT); + regs_buff[549] = rd32(E1000_TDFHS); + regs_buff[550] = rd32(E1000_TDFPC); + + if (hw->mac.type > e1000_82580) { + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; + } + + if (hw->mac.type == e1000_82576) { + for (i = 0; i < 12; i++) + regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); + for (i = 0; i < 4; i++) + regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); + + for (i = 0; i < 12; i++) + regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); + } + + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + regs_buff[739] = rd32(E1000_I210_RR2DCDELAY); +} + +static int igb_get_eeprom_len(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int igb_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igb_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if ((hw->mac.type >= e1000_i210) && + !igb_get_flash_presence_i210(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + igb_set_fw_version(adapter); + kfree(eeprom_buff); + return ret_val; +} + +static void igb_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); + + /* EEPROM image version # is reported as firmware version # for + * 82575 controllers + */ + strlcpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN; +} + +static void igb_get_ringparam(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5 + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGB_MAX_RXD; + ring->tx_max_pending = IGB_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int igb_set_ringparam(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5 + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ack) +#else + struct ethtool_ringparam *ring) +#endif +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_ring *temp_ring; + int i, err = 0; + u16 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igb_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igb_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igb_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igb_ring)); + + temp_ring[i].count = new_tx_count; + err = igb_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igb_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igb_ring)); + + /* Clear copied XDP RX-queue info */ + memset(&temp_ring[i].xdp_rxq, 0, + sizeof(temp_ring[i].xdp_rxq)); + + temp_ring[i].count = new_rx_count; + err = igb_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igb_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igb_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGB_RESETTING, &adapter->state); + return err; +} + +/* ethtool register test data */ +struct igb_reg_test { + u16 reg; + u16 reg_offset; + u16 array_len; + u16 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x100 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* i210 reg test */ +static struct igb_reg_test reg_test_i210[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i210, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0, 0 } +}; + +/* i350 reg test */ +static struct igb_reg_test reg_test_i350[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i350, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82580 reg test */ +static struct igb_reg_test reg_test_82580[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* RDH is read-only for 82580, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82576 reg test */ +static struct igb_reg_test reg_test_82576[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* Enable all RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82576, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82575 register test */ +static struct igb_reg_test reg_test_82575[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82575, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pat, val; + static const u32 _test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { + wr32(reg, (_test[pat] & write)); + val = rd32(reg) & mask; + if (val != (_test[pat] & write & mask)) { + dev_err(&adapter->pdev->dev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, (_test[pat] & write & mask)); + *data = reg; + return true; + } + } + + return false; +} + +static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val; + + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + dev_err(&adapter->pdev->dev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + return true; + } + + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int igb_reg_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_test *test; + u32 value, before, after; + u32 i, toggle; + + switch (adapter->hw.mac.type) { + case e1000_i350: + case e1000_i354: + test = reg_test_i350; + toggle = 0x7FEFF3FF; + break; + case e1000_i210: + case e1000_i211: + test = reg_test_i210; + toggle = 0x7FEFF3FF; + break; + case e1000_82580: + test = reg_test_82580; + toggle = 0x7FEFF3FF; + break; + case e1000_82576: + test = reg_test_82576; + toggle = 0x7FFFF3FF; + break; + default: + test = reg_test_82575; + toggle = 0x7FFFF3FF; + break; + } + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writable on newer MACs. + */ + before = rd32(E1000_STATUS); + value = (rd32(E1000_STATUS) & toggle); + wr32(E1000_STATUS, toggle); + after = rd32(E1000_STATUS) & toggle; + if (value != after) { + dev_err(&adapter->pdev->dev, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ + wr32(E1000_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + REG_PATTERN_TEST(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case SET_READ_TEST: + REG_SET_AND_CHECK(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + writel(test->write, + (adapter->hw.hw_addr + test->reg) + + (i * test->reg_offset)); + break; + case TABLE32_TEST: + REG_PATTERN_TEST(test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + REG_PATTERN_TEST(test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + REG_PATTERN_TEST((test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + } + test++; + } + + *data = 0; + return 0; +} + +static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* Validate eeprom on all parts but flashless */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + } + break; + default: + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + break; + } + + return *data; +} + +static irqreturn_t igb_test_intr(int irq, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *) data; + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= rd32(E1000_ICR); + + return IRQ_HANDLED; +} + +static int igb_intr_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 mask, ics_mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + if (request_irq(adapter->msix_entries[0].vector, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { + shared_int = false; + if (request_irq(irq, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, + netdev->name, adapter)) { + shared_int = false; + } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, + netdev->name, adapter)) { + *data = 1; + return -1; + } + dev_info(&adapter->pdev->dev, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + usleep_range(10000, 11000); + + /* Define all writable bits for ICS */ + switch (hw->mac.type) { + case e1000_82575: + ics_mask = 0x37F47EDD; + break; + case e1000_82576: + ics_mask = 0x77D4FBFD; + break; + case e1000_82580: + ics_mask = 0x77DCFED5; + break; + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + ics_mask = 0x77DCFED5; + break; + default: + ics_mask = 0x7FFFFFFF; + break; + } + + /* Test each interrupt */ + for (; i < 31; i++) { + /* Interrupt to test */ + mask = BIT(i); + + if (!(mask & ics_mask)) + continue; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, mask); + wr32(E1000_ICS, mask); + wrfl(); + usleep_range(10000, 11000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMS, mask); + wr32(E1000_ICS, mask); + wrfl(); + usleep_range(10000, 11000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, ~mask); + wr32(E1000_ICS, ~mask); + wrfl(); + usleep_range(10000, 11000); + + if (adapter->test_icr & mask) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + usleep_range(10000, 11000); + + /* Unhook test interrupt handler */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) + free_irq(adapter->msix_entries[0].vector, adapter); + else + free_irq(irq, adapter); + + return *data; +} + +static void igb_free_desc_rings(struct igb_adapter *adapter) +{ + igb_free_tx_resources(&adapter->test_tx_ring); + igb_free_rx_resources(&adapter->test_rx_ring); +} + +static int igb_setup_desc_rings(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + struct e1000_hw *hw = &adapter->hw; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IGB_DEFAULT_TXD; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_tx_resources(tx_ring)) { + ret_val = 1; + goto err_nomem; + } + + igb_setup_tctl(adapter); + igb_configure_tx_ring(adapter, tx_ring); + + /* Setup Rx descriptor ring and Rx buffers */ + rx_ring->count = IGB_DEFAULT_RXD; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_rx_resources(rx_ring)) { + ret_val = 3; + goto err_nomem; + } + + /* set the default queue to queue 0 of PF */ + wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); + + /* enable receive ring */ + igb_setup_rctl(adapter); + igb_configure_rx_ring(adapter, rx_ring); + + igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); + + return 0; + +err_nomem: + igb_free_desc_rings(adapter); + return ret_val; +} + +static void igb_phy_disable_receiver(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + igb_write_phy_reg(hw, 29, 0x001F); + igb_write_phy_reg(hw, 30, 0x8FFC); + igb_write_phy_reg(hw, 29, 0x001A); + igb_write_phy_reg(hw, 30, 0x8FF0); +} + +static int igb_integrated_phy_loopback(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + + hw->mac.autoneg = false; + + if (hw->phy.type == e1000_phy_m88) { + if (hw->phy.id != I210_I_PHY_ID) { + /* Auto-MDI/MDIX Off */ + igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else { + /* force 1000, set loopback */ + igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + } + } else if (hw->phy.type == e1000_phy_82580) { + /* enable MII loopback */ + igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); + } + + /* add small delay to avoid loopback test failure */ + msleep(50); + + /* force 1000, set loopback */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD | /* Force Duplex to FULL */ + E1000_CTRL_SLU); /* Set link up enable bit */ + + if (hw->phy.type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + + wr32(E1000_CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy.type == e1000_phy_m88) + igb_phy_disable_receiver(adapter); + + msleep(500); + return 0; +} + +static int igb_set_phy_loopback(struct igb_adapter *adapter) +{ + return igb_integrated_phy_loopback(adapter); +} + +static int igb_setup_loopback_test(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + reg = rd32(E1000_CTRL_EXT); + + /* use CTRL_EXT to identify link type as SGMII can appear as copper */ + if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII) || + (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { + /* Enable DH89xxCC MPHY for near end loopback */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + reg = rd32(E1000_RCTL); + reg |= E1000_RCTL_LBM_TCVR; + wr32(E1000_RCTL, reg); + + wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); + + reg = rd32(E1000_CTRL); + reg &= ~(E1000_CTRL_RFCE | + E1000_CTRL_TFCE | + E1000_CTRL_LRST); + reg |= E1000_CTRL_SLU | + E1000_CTRL_FD; + wr32(E1000_CTRL, reg); + + /* Unset switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg &= ~E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + + /* Unset sigdetect for SERDES loopback on + * 82580 and newer devices. + */ + if (hw->mac.type >= e1000_82580) { + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_IGN_SD; + wr32(E1000_PCS_CFG0, reg); + } + + /* Set PCS register for forced speed */ + reg = rd32(E1000_PCS_LCTL); + reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ + reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ + E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ + E1000_PCS_LCTL_FSD | /* Force Speed */ + E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ + wr32(E1000_PCS_LCTL, reg); + + return 0; + } + + return igb_set_phy_loopback(adapter); +} + +static void igb_loopback_cleanup(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII)) { + u32 reg; + + /* Disable near end loopback on DH89xxCC */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + rctl = rd32(E1000_RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + wr32(E1000_RCTL, rctl); + + hw->mac.autoneg = true; + igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); + igb_phy_sw_reset(hw); + } +} + +static void igb_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size /= 2; + memset(&skb->data[frame_size], 0xAA, frame_size - 1); + skb->data[frame_size + 10] = 0xBE; + skb->data[frame_size + 12] = 0xAF; +} + +static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + + data = kmap(rx_buffer->page); + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap(rx_buffer->page); + + return match; +} + +static int igb_clean_test_rings(struct igb_ring *rx_ring, + struct igb_ring *tx_ring, + unsigned int size) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *rx_buffer_info; + struct igb_tx_buffer *tx_buffer_info; + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + + while (rx_desc->wb.upper.length) { + /* check Rx buffer */ + rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer_info->dma, + size, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (igb_check_lbtest_frame(rx_buffer_info, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer_info->dma, + size, + DMA_FROM_DEVICE); + + /* unmap buffer on Tx side */ + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer_info->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer_info, dma), + dma_unmap_len(tx_buffer_info, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer_info, len, 0); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* re-map buffers to ring, store next to clean values */ + igb_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int igb_run_loopback_test(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + u16 i, j, lc, good_cnt; + int ret_val = 0; + unsigned int size = IGB_RX_HDR_LEN; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + igb_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { /* loop count loop */ + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + break; + } + } /* end loop count loop */ + + /* free the original skb */ + kfree_skb(skb); + + return ret_val; +} + +static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) +{ + /* PHY loopback cannot be performed if SoL/IDER + * sessions are active + */ + if (igb_check_reset_block(&adapter->hw)) { + dev_err(&adapter->pdev->dev, + "Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + if (adapter->hw.mac.type == e1000_i354) { + dev_info(&adapter->pdev->dev, + "Loopback test not supported on i354.\n"); + *data = 0; + goto out; + } + *data = igb_setup_desc_rings(adapter); + if (*data) + goto out; + *data = igb_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = igb_run_loopback_test(adapter); + igb_loopback_cleanup(adapter); + +err_loopback: + igb_free_desc_rings(adapter); +out: + return *data; +} + +static int igb_link_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->mac.serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.autoneg) + msleep(5000); + + if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static void igb_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex, autoneg; + bool if_running = netif_running(netdev); + + set_bit(__IGB_TESTING, &adapter->state); + + /* can't do offline tests on media switching devices */ + if (adapter->hw.dev_spec._82575.mas_capable) + eth_test->flags &= ~ETH_TEST_FL_OFFLINE; + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + dev_info(&adapter->pdev->dev, "offline testing starting\n"); + + /* power up link for link test */ + igb_power_up_link(adapter); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (igb_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + igb_close(netdev); + else + igb_reset(adapter); + + if (igb_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_intr_test(adapter, &data[TEST_IRQ])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + /* power up link for loopback test */ + igb_power_up_link(adapter); + if (igb_loopback_test(adapter, &data[TEST_LOOP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = true; + igb_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = false; + + clear_bit(__IGB_TESTING, &adapter->state); + if (if_running) + igb_open(netdev); + } else { + dev_info(&adapter->pdev->dev, "online testing starting\n"); + + /* PHY is powered down when interface is down */ + if (if_running && igb_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + else + data[TEST_LINK] = 0; + + /* Online tests aren't run; pass by default */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + clear_bit(__IGB_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & E1000_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= E1000_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +/* bit defines for adapter->led_status */ +#define IGB_LED_ON 0 + +static int igb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + igb_blink_led(hw); + return 2; + case ETHTOOL_ID_ON: + igb_blink_led(hw); + break; + case ETHTOOL_ID_OFF: + igb_led_off(hw); + break; + case ETHTOOL_ID_INACTIVE: + igb_led_off(hw); + clear_bit(IGB_LED_ON, &adapter->led_status); + igb_cleanup_led(hw); + break; + } + + return 0; +} + +static int igb_set_coalesce(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 4 + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->tx_coalesce_usecs > 3) && + (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->tx_coalesce_usecs == 2)) + return -EINVAL; + + if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGB_FLAG_DMAC) + adapter->flags &= ~IGB_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +static int igb_get_coalesce(struct net_device *netdev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 4 + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) +#else + struct ethtool_coalesce *ec) +#endif +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igb_nway_reset(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + igb_reinit_locked(adapter); + return 0; +} + +static int igb_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGB_STATS_LEN; + case ETH_SS_TEST: + return IGB_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGB_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igb_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igb_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igb_gstrings_stats[i].stat_offset; + data[i] = (igb_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; + data[i] = (igb_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i+1] = ring->tx_stats.bytes; + data[i+2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + data[i+2] += restart2; + + i += IGB_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i+1] = ring->rx_stats.bytes; + data[i+2] = ring->rx_stats.drops; + data[i+3] = ring->rx_stats.csum_err; + data[i+4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + i += IGB_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igb_gstrings_test, + IGB_TEST_LEN*ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_stats[i].stat_string); + for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igb_priv_flags_strings, + IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igb_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case e1000_82575: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + + /* 82576 does not support timestamping all packets. */ + if (adapter->hw.mac.type >= e1000_82580) + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + else + info->rx_filters |= + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igb_nfc_filter *rule = NULL; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + if (rule->filter.match_flags) { + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + if (rule->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = rule->filter.etype; + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = rule->filter.vlan_tci; + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + return 0; + } + return -EINVAL; +} + +static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_nfc_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igb_get_rss_hash_opts(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igb */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = igb_get_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = igb_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ + IGB_FLAG_RSS_FIELD_IPV6_UDP) +static int igb_set_rss_hash_opt(struct igb_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct e1000_hw *hw = &adapter->hw; + u32 mrqc = rd32(E1000_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + dev_err(&adapter->pdev->dev, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | + E1000_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(E1000_MRQC, mrqc); + } + + return 0; +} + +static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 i; + u32 etqf; + u16 etype; + + /* find an empty etype filter register */ + for (i = 0; i < MAX_ETYPE_FILTER; ++i) { + if (!adapter->etype_bitmap[i]) + break; + } + if (i == MAX_ETYPE_FILTER) { + dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); + return -EINVAL; + } + + adapter->etype_bitmap[i] = true; + + etqf = rd32(E1000_ETQF(i)); + etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); + + etqf |= E1000_ETQF_FILTER_ENABLE; + etqf &= ~E1000_ETQF_ETYPE_MASK; + etqf |= (etype & E1000_ETQF_ETYPE_MASK); + + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT) + & E1000_ETQF_QUEUE_MASK); + etqf |= E1000_ETQF_QUEUE_ENABLE; + + wr32(E1000_ETQF(i), etqf); + + input->etype_reg_index = i; + + return 0; +} + +static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u16 queue_index; + u32 vlapqf; + + vlapqf = rd32(E1000_VLAPQF); + vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) + >> VLAN_PRIO_SHIFT; + queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK; + + /* check whether this vlan prio is already set */ + if ((vlapqf & E1000_VLAPQF_P_VALID(vlan_priority)) && + (queue_index != input->action)) { + dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); + return -EEXIST; + } + + vlapqf |= E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf |= E1000_VLAPQF_QUEUE_SEL(vlan_priority, input->action); + + wr32(E1000_VLAPQF, vlapqf); + + return 0; +} + +int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + int err = -EINVAL; + + if (hw->mac.type == e1000_i210 && + !(input->filter.match_flags & ~IGB_FILTER_FLAG_SRC_MAC_ADDR)) { + dev_err(&adapter->pdev->dev, + "i210 doesn't support flow classification rules specifying only source addresses.\n"); + return -EOPNOTSUPP; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + err = igb_rxnfc_write_etype_filter(adapter, input); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.dst_addr, + input->action, 0); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + err = igb_rxnfc_write_vlan_prio_filter(adapter, input); + + return err; +} + +static void igb_clear_etype_filter_regs(struct igb_adapter *adapter, + u16 reg_index) +{ + struct e1000_hw *hw = &adapter->hw; + u32 etqf = rd32(E1000_ETQF(reg_index)); + + etqf &= ~E1000_ETQF_QUEUE_ENABLE; + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf &= ~E1000_ETQF_FILTER_ENABLE; + + wr32(E1000_ETQF(reg_index), etqf); + + adapter->etype_bitmap[reg_index] = false; +} + +static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter, + u16 vlan_tci) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u32 vlapqf; + + vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + vlapqf = rd32(E1000_VLAPQF); + vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf &= ~E1000_VLAPQF_QUEUE_SEL(vlan_priority, + E1000_VLAPQF_QUEUE_MASK); + + wr32(E1000_VLAPQF, vlapqf); +} + +int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) + igb_clear_etype_filter_regs(adapter, + input->etype_reg_index); + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + igb_clear_vlan_prio_filter(adapter, + ntohs(input->filter.vlan_tci)); + + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.dst_addr, + input->action, 0); + + return 0; +} + +static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter, + struct igb_nfc_filter *input, + u16 sw_idx) +{ + struct igb_nfc_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + if (!input) + err = igb_erase_filter(adapter, rule); + + hlist_del(&rule->nfc_node); + kfree(rule); + adapter->nfc_filter_count--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node */ + INIT_HLIST_NODE(&input->nfc_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->nfc_node, &parent->nfc_node); + else + hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); + + /* update counts */ + adapter->nfc_filter_count++; + + return 0; +} + +static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igb_nfc_filter *input, *rule; + int err = 0; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + /* Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if ((fsp->ring_cookie == RX_CLS_FLOW_DISC) || + (fsp->ring_cookie >= adapter->num_rx_queues)) { + dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); + return -EINVAL; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= IGB_MAX_RXNFC_FILTERS) { + dev_err(&adapter->pdev->dev, "Location out of range\n"); + return -EINVAL; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + input->filter.etype = fsp->h_u.ether_spec.h_proto; + input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE; + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + err = -EINVAL; + goto err_out; + } + input->filter.vlan_tci = fsp->h_ext.vlan_tci; + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + } + + input->action = fsp->ring_cookie; + input->sw_idx = fsp->location; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&input->filter, &rule->filter, + sizeof(input->filter))) { + err = -EEXIST; + dev_err(&adapter->pdev->dev, + "ethtool: this filter is already set\n"); + goto err_out_w_lock; + } + } + + err = igb_add_filter(adapter, input); + if (err) + goto err_out_w_lock; + + igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->nfc_lock); + return 0; + +err_out_w_lock: + spin_unlock(&adapter->nfc_lock); +err_out: + kfree(input); + return err; +} + +static int igb_del_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->nfc_lock); + err = igb_update_ethtool_nfc_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = igb_set_rss_hash_opt(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = igb_add_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = igb_del_ethtool_nfc_entry(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ret_val; + u16 phy_data; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + edata->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full); + if (!hw->dev_spec._82575.eee_disable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* The IPCNFG and EEER registers are not supported on I354. */ + if (hw->mac.type == e1000_i354) { + igb_get_eee_status_i354(hw, (bool *)&edata->eee_active); + } else { + u32 eeer; + + eeer = rd32(E1000_EEER); + + /* EEE status on negotiated link */ + if (eeer & E1000_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & E1000_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + } + + /* EEE Link Partner Advertised */ + switch (hw->mac.type) { + case e1000_i350: + ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + break; + case e1000_i354: + case e1000_i210: + case e1000_i211: + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, + E1000_EEE_LP_ADV_DEV_I210, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + break; + default: + break; + } + + edata->eee_enabled = !hw->dev_spec._82575.eee_disable; + + if ((hw->mac.type == e1000_i354) && + (edata->eee_enabled)) + edata->tx_lpi_enabled = true; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igb_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + bool adv1g_eee = true, adv100m_eee = true; + s32 ret_val; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igb_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + dev_err(&adapter->pdev->dev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + dev_err(&adapter->pdev->dev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (!edata->advertised || (edata->advertised & + ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { + dev_err(&adapter->pdev->dev, + "EEE Advertisement supports only 100Tx and/or 100T full duplex\n"); + return -EINVAL; + } + adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); + adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); + + } else if (!edata->eee_enabled) { + dev_err(&adapter->pdev->dev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { + hw->dev_spec._82575.eee_disable = !edata->eee_enabled; + adapter->flags |= IGB_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + } + + if (hw->mac.type == e1000_i354) + ret_val = igb_set_eee_i354(hw, adv1g_eee, adv100m_eee); + else + ret_val = igb_set_eee_i350(hw, adv1g_eee, adv100m_eee); + + if (ret_val) { + dev_err(&adapter->pdev->dev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + return 0; +} + +static int igb_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status = 0; + u16 sff8472_rev, addr_mode; + bool page_swap = false; + + if ((hw->phy.media_type == e1000_media_type_copper) || + (hw->phy.media_type == e1000_media_type_unknown)) + return -EOPNOTSUPP; + + /* Check whether we support SFF-8472 or not */ + status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); + if (status) + return -EIO; + + /* addressing mode is not supported */ + status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); + if (status) + return -EIO; + + /* addressing mode is not supported */ + if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { + hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { + /* We have an SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int igb_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status = 0; + u16 *dataword; + u16 first_word, last_word; + int i = 0; + + if (ee->len == 0) + return -EINVAL; + + first_word = ee->offset >> 1; + last_word = (ee->offset + ee->len - 1) >> 1; + + dataword = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!dataword) + return -ENOMEM; + + /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ + for (i = 0; i < last_word - first_word + 1; i++) { + status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2, + &dataword[i]); + if (status) { + /* Error occurred while reading module */ + kfree(dataword); + return -EIO; + } + + be16_to_cpus(&dataword[i]); + } + + memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len); + kfree(dataword); + + return 0; +} + +static int igb_ethtool_begin(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igb_ethtool_complete(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_put(&adapter->pdev->dev); +} + +static u32 igb_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGB_RETA_SIZE; +} + +static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGB_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +void igb_write_rss_indir_tbl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg = E1000_RETA(0); + u32 shift = 0; + int i = 0; + + switch (hw->mac.type) { + case e1000_82575: + shift = 6; + break; + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + shift = 3; + break; + default: + break; + } + + while (i < IGB_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int i; + u32 num_queues; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_queues = 2; + break; + default: + break; + } + + /* Verify user input. */ + for (i = 0; i < IGB_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + + for (i = 0; i < IGB_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igb_write_rss_indir_tbl(adapter); + + return 0; +} + +static unsigned int igb_max_channels(struct igb_adapter *adapter) +{ + return igb_get_max_rss_queues(adapter); +} + +static void igb_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igb_max_channels(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igb_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igb_max_channels(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igb_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igb_reinit_queues(adapter); + } + + return 0; +} + +static u32 igb_get_priv_flags(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGB_FLAG_RX_LEGACY) + priv_flags |= IGB_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGB_FLAG_RX_LEGACY; + if (priv_flags & IGB_PRIV_FLAGS_LEGACY_RX) + flags |= IGB_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igb_reinit_locked(adapter); + } + + return 0; +} + +static const struct ethtool_ops igb_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igb_get_drvinfo, + .get_regs_len = igb_get_regs_len, + .get_regs = igb_get_regs, + .get_wol = igb_get_wol, + .set_wol = igb_set_wol, + .get_msglevel = igb_get_msglevel, + .set_msglevel = igb_set_msglevel, + .nway_reset = igb_nway_reset, + .get_link = igb_get_link, + .get_eeprom_len = igb_get_eeprom_len, + .get_eeprom = igb_get_eeprom, + .set_eeprom = igb_set_eeprom, + .get_ringparam = igb_get_ringparam, + .set_ringparam = igb_set_ringparam, + .get_pauseparam = igb_get_pauseparam, + .set_pauseparam = igb_set_pauseparam, + .self_test = igb_diag_test, + .get_strings = igb_get_strings, + .set_phys_id = igb_set_phys_id, + .get_sset_count = igb_get_sset_count, + .get_ethtool_stats = igb_get_ethtool_stats, + .get_coalesce = igb_get_coalesce, + .set_coalesce = igb_set_coalesce, + .get_ts_info = igb_get_ts_info, + .get_rxnfc = igb_get_rxnfc, + .set_rxnfc = igb_set_rxnfc, + .get_eee = igb_get_eee, + .set_eee = igb_set_eee, + .get_module_info = igb_get_module_info, + .get_module_eeprom = igb_get_module_eeprom, + .get_rxfh_indir_size = igb_get_rxfh_indir_size, + .get_rxfh = igb_get_rxfh, + .set_rxfh = igb_set_rxfh, + .get_channels = igb_get_channels, + .set_channels = igb_set_channels, + .get_priv_flags = igb_get_priv_flags, + .set_priv_flags = igb_set_priv_flags, + .begin = igb_ethtool_begin, + .complete = igb_ethtool_complete, + .get_link_ksettings = igb_get_link_ksettings, + .set_link_ksettings = igb_set_link_ksettings, +}; + +void igb_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igb_ethtool_ops; +} diff --git a/devices/igb/igb_ethtool-5.14-orig.c b/devices/igb/igb_ethtool-5.14-orig.c new file mode 100644 index 00000000..636a1b1f --- /dev/null +++ b/devices/igb/igb_ethtool-5.14-orig.c @@ -0,0 +1,3499 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* ethtool support for igb */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igb.h" + +struct igb_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGB_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igb_adapter, _stat), \ + .stat_offset = offsetof(struct igb_adapter, _stat) \ +} +static const struct igb_stats igb_gstrings_stats[] = { + IGB_STAT("rx_packets", stats.gprc), + IGB_STAT("tx_packets", stats.gptc), + IGB_STAT("rx_bytes", stats.gorc), + IGB_STAT("tx_bytes", stats.gotc), + IGB_STAT("rx_broadcast", stats.bprc), + IGB_STAT("tx_broadcast", stats.bptc), + IGB_STAT("rx_multicast", stats.mprc), + IGB_STAT("tx_multicast", stats.mptc), + IGB_STAT("multicast", stats.mprc), + IGB_STAT("collisions", stats.colc), + IGB_STAT("rx_crc_errors", stats.crcerrs), + IGB_STAT("rx_no_buffer_count", stats.rnbc), + IGB_STAT("rx_missed_errors", stats.mpc), + IGB_STAT("tx_aborted_errors", stats.ecol), + IGB_STAT("tx_carrier_errors", stats.tncrs), + IGB_STAT("tx_window_errors", stats.latecol), + IGB_STAT("tx_abort_late_coll", stats.latecol), + IGB_STAT("tx_deferred_ok", stats.dc), + IGB_STAT("tx_single_coll_ok", stats.scc), + IGB_STAT("tx_multi_coll_ok", stats.mcc), + IGB_STAT("tx_timeout_count", tx_timeout_count), + IGB_STAT("rx_long_length_errors", stats.roc), + IGB_STAT("rx_short_length_errors", stats.ruc), + IGB_STAT("rx_align_errors", stats.algnerrc), + IGB_STAT("tx_tcp_seg_good", stats.tsctc), + IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGB_STAT("rx_flow_control_xon", stats.xonrxc), + IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGB_STAT("tx_flow_control_xon", stats.xontxc), + IGB_STAT("tx_flow_control_xoff", stats.xofftxc), + IGB_STAT("rx_long_byte_count", stats.gorc), + IGB_STAT("tx_dma_out_of_sync", stats.doosync), + IGB_STAT("tx_smbus", stats.mgptc), + IGB_STAT("rx_smbus", stats.mgprc), + IGB_STAT("dropped_smbus", stats.mgpdc), + IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGB_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +#define IGB_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} +static const struct igb_stats igb_gstrings_net_stats[] = { + IGB_NETDEV_STAT(rx_errors), + IGB_NETDEV_STAT(tx_errors), + IGB_NETDEV_STAT(tx_dropped), + IGB_NETDEV_STAT(rx_length_errors), + IGB_NETDEV_STAT(rx_over_errors), + IGB_NETDEV_STAT(rx_frame_errors), + IGB_NETDEV_STAT(rx_fifo_errors), + IGB_NETDEV_STAT(tx_fifo_errors), + IGB_NETDEV_STAT(tx_heartbeat_errors) +}; + +#define IGB_GLOBAL_STATS_LEN \ + (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) +#define IGB_NETDEV_STATS_LEN \ + (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) +#define IGB_RX_QUEUE_STATS_LEN \ + (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) + +#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ + +#define IGB_QUEUE_STATS_LEN \ + ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGB_RX_QUEUE_STATS_LEN) + \ + (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGB_TX_QUEUE_STATS_LEN)) +#define IGB_STATS_LEN \ + (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) + +enum igb_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; +#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) + +static const char igb_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGB_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGB_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igb_priv_flags_strings) + +static int igb_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u32 status; + u32 speed; + u32 supported, advertising; + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(E1000_STATUS); + if (hw->phy.media_type == e1000_media_type_copper) { + + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause); + advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->phy.autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + } else { + supported = (SUPPORTED_FIBRE | + SUPPORTED_1000baseKX_Full | + SUPPORTED_Autoneg | + SUPPORTED_Pause); + advertising = (ADVERTISED_FIBRE | + ADVERTISED_1000baseKX_Full); + if (hw->mac.type == e1000_i354) { + if ((hw->device_id == + E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + supported |= SUPPORTED_2500baseX_Full; + supported &= ~SUPPORTED_1000baseKX_Full; + advertising |= ADVERTISED_2500baseX_Full; + advertising &= ~ADVERTISED_1000baseKX_Full; + } + } + if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { + supported |= SUPPORTED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; + } + if (hw->mac.autoneg == 1) + advertising |= ADVERTISED_Autoneg; + + cmd->base.port = PORT_FIBRE; + } + if (hw->mac.autoneg != 1) + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + + switch (hw->fc.requested_mode) { + case e1000_fc_full: + advertising |= ADVERTISED_Pause; + break; + case e1000_fc_rx_pause: + advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + break; + case e1000_fc_tx_pause: + advertising |= ADVERTISED_Asym_Pause; + break; + default: + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + if (status & E1000_STATUS_LU) { + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + speed = SPEED_2500; + } else if (status & E1000_STATUS_SPEED_1000) { + speed = SPEED_1000; + } else if (status & E1000_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == e1000_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int igb_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed + */ + if (igb_check_reset_block(hw)) { + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when SoL/IDER is active.\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->phy.media_type == e1000_media_type_fiber) { + hw->phy.autoneg_advertised = advertising | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + switch (adapter->link_speed) { + case SPEED_2500: + hw->phy.autoneg_advertised = + ADVERTISED_2500baseX_Full; + break; + case SPEED_1000: + hw->phy.autoneg_advertised = + ADVERTISED_1000baseT_Full; + break; + case SPEED_100: + hw->phy.autoneg_advertised = + ADVERTISED_100baseT_Full; + break; + default: + break; + } + } else { + hw->phy.autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } + advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (igb_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__IGB_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + return 0; +} + +static u32 igb_get_link(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igb_has_link(adapter); +} + +static void igb_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == e1000_fc_rx_pause) + pause->rx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_tx_pause) + pause->tx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igb_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + int i; + + /* 100basefx does not support setting link flow control */ + if (hw->dev_spec._82575.eth_flags.e100_base_fx) + return -EINVAL; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = e1000_fc_default; + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else { + igb_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == e1000_media_type_copper) ? + igb_force_mac_fc(hw) : igb_setup_link(hw)); + + /* Make sure SRRCTL considers new fc settings for each ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + igb_setup_srrctl(adapter, ring); + } + } + + clear_bit(__IGB_RESETTING, &adapter->state); + return retval; +} + +static u32 igb_get_msglevel(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void igb_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int igb_get_regs_len(struct net_device *netdev) +{ +#define IGB_REGS_LEN 740 + return IGB_REGS_LEN * sizeof(u32); +} + +static void igb_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGB_REGS_LEN * sizeof(u32)); + + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(E1000_CTRL); + regs_buff[1] = rd32(E1000_STATUS); + regs_buff[2] = rd32(E1000_CTRL_EXT); + regs_buff[3] = rd32(E1000_MDIC); + regs_buff[4] = rd32(E1000_SCTL); + regs_buff[5] = rd32(E1000_CONNSW); + regs_buff[6] = rd32(E1000_VET); + regs_buff[7] = rd32(E1000_LEDCTL); + regs_buff[8] = rd32(E1000_PBA); + regs_buff[9] = rd32(E1000_PBS); + regs_buff[10] = rd32(E1000_FRTIMER); + regs_buff[11] = rd32(E1000_TCPTIMER); + + /* NVM Register */ + regs_buff[12] = rd32(E1000_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[13] = rd32(E1000_EICS); + regs_buff[14] = rd32(E1000_EICS); + regs_buff[15] = rd32(E1000_EIMS); + regs_buff[16] = rd32(E1000_EIMC); + regs_buff[17] = rd32(E1000_EIAC); + regs_buff[18] = rd32(E1000_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[19] = rd32(E1000_ICS); + regs_buff[20] = rd32(E1000_ICS); + regs_buff[21] = rd32(E1000_IMS); + regs_buff[22] = rd32(E1000_IMC); + regs_buff[23] = rd32(E1000_IAC); + regs_buff[24] = rd32(E1000_IAM); + regs_buff[25] = rd32(E1000_IMIRVP); + + /* Flow Control */ + regs_buff[26] = rd32(E1000_FCAL); + regs_buff[27] = rd32(E1000_FCAH); + regs_buff[28] = rd32(E1000_FCTTV); + regs_buff[29] = rd32(E1000_FCRTL); + regs_buff[30] = rd32(E1000_FCRTH); + regs_buff[31] = rd32(E1000_FCRTV); + + /* Receive */ + regs_buff[32] = rd32(E1000_RCTL); + regs_buff[33] = rd32(E1000_RXCSUM); + regs_buff[34] = rd32(E1000_RLPML); + regs_buff[35] = rd32(E1000_RFCTL); + regs_buff[36] = rd32(E1000_MRQC); + regs_buff[37] = rd32(E1000_VT_CTL); + + /* Transmit */ + regs_buff[38] = rd32(E1000_TCTL); + regs_buff[39] = rd32(E1000_TCTL_EXT); + regs_buff[40] = rd32(E1000_TIPG); + regs_buff[41] = rd32(E1000_DTXCTL); + + /* Wake Up */ + regs_buff[42] = rd32(E1000_WUC); + regs_buff[43] = rd32(E1000_WUFC); + regs_buff[44] = rd32(E1000_WUS); + regs_buff[45] = rd32(E1000_IPAV); + regs_buff[46] = rd32(E1000_WUPL); + + /* MAC */ + regs_buff[47] = rd32(E1000_PCS_CFG0); + regs_buff[48] = rd32(E1000_PCS_LCTL); + regs_buff[49] = rd32(E1000_PCS_LSTAT); + regs_buff[50] = rd32(E1000_PCS_ANADV); + regs_buff[51] = rd32(E1000_PCS_LPAB); + regs_buff[52] = rd32(E1000_PCS_NPTX); + regs_buff[53] = rd32(E1000_PCS_LPABNP); + + /* Statistics */ + regs_buff[54] = adapter->stats.crcerrs; + regs_buff[55] = adapter->stats.algnerrc; + regs_buff[56] = adapter->stats.symerrs; + regs_buff[57] = adapter->stats.rxerrc; + regs_buff[58] = adapter->stats.mpc; + regs_buff[59] = adapter->stats.scc; + regs_buff[60] = adapter->stats.ecol; + regs_buff[61] = adapter->stats.mcc; + regs_buff[62] = adapter->stats.latecol; + regs_buff[63] = adapter->stats.colc; + regs_buff[64] = adapter->stats.dc; + regs_buff[65] = adapter->stats.tncrs; + regs_buff[66] = adapter->stats.sec; + regs_buff[67] = adapter->stats.htdpmc; + regs_buff[68] = adapter->stats.rlec; + regs_buff[69] = adapter->stats.xonrxc; + regs_buff[70] = adapter->stats.xontxc; + regs_buff[71] = adapter->stats.xoffrxc; + regs_buff[72] = adapter->stats.xofftxc; + regs_buff[73] = adapter->stats.fcruc; + regs_buff[74] = adapter->stats.prc64; + regs_buff[75] = adapter->stats.prc127; + regs_buff[76] = adapter->stats.prc255; + regs_buff[77] = adapter->stats.prc511; + regs_buff[78] = adapter->stats.prc1023; + regs_buff[79] = adapter->stats.prc1522; + regs_buff[80] = adapter->stats.gprc; + regs_buff[81] = adapter->stats.bprc; + regs_buff[82] = adapter->stats.mprc; + regs_buff[83] = adapter->stats.gptc; + regs_buff[84] = adapter->stats.gorc; + regs_buff[86] = adapter->stats.gotc; + regs_buff[88] = adapter->stats.rnbc; + regs_buff[89] = adapter->stats.ruc; + regs_buff[90] = adapter->stats.rfc; + regs_buff[91] = adapter->stats.roc; + regs_buff[92] = adapter->stats.rjc; + regs_buff[93] = adapter->stats.mgprc; + regs_buff[94] = adapter->stats.mgpdc; + regs_buff[95] = adapter->stats.mgptc; + regs_buff[96] = adapter->stats.tor; + regs_buff[98] = adapter->stats.tot; + regs_buff[100] = adapter->stats.tpr; + regs_buff[101] = adapter->stats.tpt; + regs_buff[102] = adapter->stats.ptc64; + regs_buff[103] = adapter->stats.ptc127; + regs_buff[104] = adapter->stats.ptc255; + regs_buff[105] = adapter->stats.ptc511; + regs_buff[106] = adapter->stats.ptc1023; + regs_buff[107] = adapter->stats.ptc1522; + regs_buff[108] = adapter->stats.mptc; + regs_buff[109] = adapter->stats.bptc; + regs_buff[110] = adapter->stats.tsctc; + regs_buff[111] = adapter->stats.iac; + regs_buff[112] = adapter->stats.rpthc; + regs_buff[113] = adapter->stats.hgptc; + regs_buff[114] = adapter->stats.hgorc; + regs_buff[116] = adapter->stats.hgotc; + regs_buff[118] = adapter->stats.lenerrs; + regs_buff[119] = adapter->stats.scvpc; + regs_buff[120] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[129 + i] = rd32(E1000_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[133 + i] = rd32(E1000_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[137 + i] = rd32(E1000_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[141 + i] = rd32(E1000_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[145 + i] = rd32(E1000_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[153 + i] = rd32(E1000_EITR(i)); + for (i = 0; i < 8; i++) + regs_buff[163 + i] = rd32(E1000_IMIR(i)); + for (i = 0; i < 8; i++) + regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); + for (i = 0; i < 16; i++) + regs_buff[179 + i] = rd32(E1000_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[195 + i] = rd32(E1000_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[211 + i] = rd32(E1000_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[215 + i] = rd32(E1000_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[219 + i] = rd32(E1000_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[223 + i] = rd32(E1000_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[227 + i] = rd32(E1000_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); + + for (i = 0; i < 4; i++) + regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); + for (i = 0; i < 32; i++) + regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); + + regs_buff[547] = rd32(E1000_TDFH); + regs_buff[548] = rd32(E1000_TDFT); + regs_buff[549] = rd32(E1000_TDFHS); + regs_buff[550] = rd32(E1000_TDFPC); + + if (hw->mac.type > e1000_82580) { + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; + } + + if (hw->mac.type == e1000_82576) { + for (i = 0; i < 12; i++) + regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); + for (i = 0; i < 4; i++) + regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); + + for (i = 0; i < 12; i++) + regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); + } + + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + regs_buff[739] = rd32(E1000_I210_RR2DCDELAY); +} + +static int igb_get_eeprom_len(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int igb_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igb_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if ((hw->mac.type >= e1000_i210) && + !igb_get_flash_presence_i210(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + igb_set_fw_version(adapter); + kfree(eeprom_buff); + return ret_val; +} + +static void igb_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); + + /* EEPROM image version # is reported as firmware version # for + * 82575 controllers + */ + strlcpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN; +} + +static void igb_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGB_MAX_RXD; + ring->tx_max_pending = IGB_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int igb_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_ring *temp_ring; + int i, err = 0; + u16 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igb_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igb_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igb_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igb_ring)); + + temp_ring[i].count = new_tx_count; + err = igb_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igb_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igb_ring)); + + /* Clear copied XDP RX-queue info */ + memset(&temp_ring[i].xdp_rxq, 0, + sizeof(temp_ring[i].xdp_rxq)); + + temp_ring[i].count = new_rx_count; + err = igb_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igb_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igb_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGB_RESETTING, &adapter->state); + return err; +} + +/* ethtool register test data */ +struct igb_reg_test { + u16 reg; + u16 reg_offset; + u16 array_len; + u16 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x100 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* i210 reg test */ +static struct igb_reg_test reg_test_i210[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i210, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0, 0 } +}; + +/* i350 reg test */ +static struct igb_reg_test reg_test_i350[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i350, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82580 reg test */ +static struct igb_reg_test reg_test_82580[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* RDH is read-only for 82580, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82576 reg test */ +static struct igb_reg_test reg_test_82576[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* Enable all RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82576, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82575 register test */ +static struct igb_reg_test reg_test_82575[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82575, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pat, val; + static const u32 _test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { + wr32(reg, (_test[pat] & write)); + val = rd32(reg) & mask; + if (val != (_test[pat] & write & mask)) { + dev_err(&adapter->pdev->dev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, (_test[pat] & write & mask)); + *data = reg; + return true; + } + } + + return false; +} + +static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val; + + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + dev_err(&adapter->pdev->dev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + return true; + } + + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int igb_reg_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_test *test; + u32 value, before, after; + u32 i, toggle; + + switch (adapter->hw.mac.type) { + case e1000_i350: + case e1000_i354: + test = reg_test_i350; + toggle = 0x7FEFF3FF; + break; + case e1000_i210: + case e1000_i211: + test = reg_test_i210; + toggle = 0x7FEFF3FF; + break; + case e1000_82580: + test = reg_test_82580; + toggle = 0x7FEFF3FF; + break; + case e1000_82576: + test = reg_test_82576; + toggle = 0x7FFFF3FF; + break; + default: + test = reg_test_82575; + toggle = 0x7FFFF3FF; + break; + } + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writable on newer MACs. + */ + before = rd32(E1000_STATUS); + value = (rd32(E1000_STATUS) & toggle); + wr32(E1000_STATUS, toggle); + after = rd32(E1000_STATUS) & toggle; + if (value != after) { + dev_err(&adapter->pdev->dev, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ + wr32(E1000_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + REG_PATTERN_TEST(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case SET_READ_TEST: + REG_SET_AND_CHECK(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + writel(test->write, + (adapter->hw.hw_addr + test->reg) + + (i * test->reg_offset)); + break; + case TABLE32_TEST: + REG_PATTERN_TEST(test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + REG_PATTERN_TEST(test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + REG_PATTERN_TEST((test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + } + test++; + } + + *data = 0; + return 0; +} + +static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* Validate eeprom on all parts but flashless */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + } + break; + default: + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + break; + } + + return *data; +} + +static irqreturn_t igb_test_intr(int irq, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *) data; + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= rd32(E1000_ICR); + + return IRQ_HANDLED; +} + +static int igb_intr_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 mask, ics_mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + if (request_irq(adapter->msix_entries[0].vector, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { + shared_int = false; + if (request_irq(irq, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, + netdev->name, adapter)) { + shared_int = false; + } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, + netdev->name, adapter)) { + *data = 1; + return -1; + } + dev_info(&adapter->pdev->dev, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + usleep_range(10000, 11000); + + /* Define all writable bits for ICS */ + switch (hw->mac.type) { + case e1000_82575: + ics_mask = 0x37F47EDD; + break; + case e1000_82576: + ics_mask = 0x77D4FBFD; + break; + case e1000_82580: + ics_mask = 0x77DCFED5; + break; + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + ics_mask = 0x77DCFED5; + break; + default: + ics_mask = 0x7FFFFFFF; + break; + } + + /* Test each interrupt */ + for (; i < 31; i++) { + /* Interrupt to test */ + mask = BIT(i); + + if (!(mask & ics_mask)) + continue; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, mask); + wr32(E1000_ICS, mask); + wrfl(); + usleep_range(10000, 11000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMS, mask); + wr32(E1000_ICS, mask); + wrfl(); + usleep_range(10000, 11000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, ~mask); + wr32(E1000_ICS, ~mask); + wrfl(); + usleep_range(10000, 11000); + + if (adapter->test_icr & mask) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + usleep_range(10000, 11000); + + /* Unhook test interrupt handler */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) + free_irq(adapter->msix_entries[0].vector, adapter); + else + free_irq(irq, adapter); + + return *data; +} + +static void igb_free_desc_rings(struct igb_adapter *adapter) +{ + igb_free_tx_resources(&adapter->test_tx_ring); + igb_free_rx_resources(&adapter->test_rx_ring); +} + +static int igb_setup_desc_rings(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + struct e1000_hw *hw = &adapter->hw; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IGB_DEFAULT_TXD; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_tx_resources(tx_ring)) { + ret_val = 1; + goto err_nomem; + } + + igb_setup_tctl(adapter); + igb_configure_tx_ring(adapter, tx_ring); + + /* Setup Rx descriptor ring and Rx buffers */ + rx_ring->count = IGB_DEFAULT_RXD; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_rx_resources(rx_ring)) { + ret_val = 3; + goto err_nomem; + } + + /* set the default queue to queue 0 of PF */ + wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); + + /* enable receive ring */ + igb_setup_rctl(adapter); + igb_configure_rx_ring(adapter, rx_ring); + + igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); + + return 0; + +err_nomem: + igb_free_desc_rings(adapter); + return ret_val; +} + +static void igb_phy_disable_receiver(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + igb_write_phy_reg(hw, 29, 0x001F); + igb_write_phy_reg(hw, 30, 0x8FFC); + igb_write_phy_reg(hw, 29, 0x001A); + igb_write_phy_reg(hw, 30, 0x8FF0); +} + +static int igb_integrated_phy_loopback(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + + hw->mac.autoneg = false; + + if (hw->phy.type == e1000_phy_m88) { + if (hw->phy.id != I210_I_PHY_ID) { + /* Auto-MDI/MDIX Off */ + igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else { + /* force 1000, set loopback */ + igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + } + } else if (hw->phy.type == e1000_phy_82580) { + /* enable MII loopback */ + igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); + } + + /* add small delay to avoid loopback test failure */ + msleep(50); + + /* force 1000, set loopback */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD | /* Force Duplex to FULL */ + E1000_CTRL_SLU); /* Set link up enable bit */ + + if (hw->phy.type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + + wr32(E1000_CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy.type == e1000_phy_m88) + igb_phy_disable_receiver(adapter); + + msleep(500); + return 0; +} + +static int igb_set_phy_loopback(struct igb_adapter *adapter) +{ + return igb_integrated_phy_loopback(adapter); +} + +static int igb_setup_loopback_test(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + reg = rd32(E1000_CTRL_EXT); + + /* use CTRL_EXT to identify link type as SGMII can appear as copper */ + if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII) || + (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { + /* Enable DH89xxCC MPHY for near end loopback */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + reg = rd32(E1000_RCTL); + reg |= E1000_RCTL_LBM_TCVR; + wr32(E1000_RCTL, reg); + + wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); + + reg = rd32(E1000_CTRL); + reg &= ~(E1000_CTRL_RFCE | + E1000_CTRL_TFCE | + E1000_CTRL_LRST); + reg |= E1000_CTRL_SLU | + E1000_CTRL_FD; + wr32(E1000_CTRL, reg); + + /* Unset switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg &= ~E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + + /* Unset sigdetect for SERDES loopback on + * 82580 and newer devices. + */ + if (hw->mac.type >= e1000_82580) { + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_IGN_SD; + wr32(E1000_PCS_CFG0, reg); + } + + /* Set PCS register for forced speed */ + reg = rd32(E1000_PCS_LCTL); + reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ + reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ + E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ + E1000_PCS_LCTL_FSD | /* Force Speed */ + E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ + wr32(E1000_PCS_LCTL, reg); + + return 0; + } + + return igb_set_phy_loopback(adapter); +} + +static void igb_loopback_cleanup(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII)) { + u32 reg; + + /* Disable near end loopback on DH89xxCC */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + rctl = rd32(E1000_RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + wr32(E1000_RCTL, rctl); + + hw->mac.autoneg = true; + igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); + igb_phy_sw_reset(hw); + } +} + +static void igb_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size /= 2; + memset(&skb->data[frame_size], 0xAA, frame_size - 1); + skb->data[frame_size + 10] = 0xBE; + skb->data[frame_size + 12] = 0xAF; +} + +static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + + data = kmap(rx_buffer->page); + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap(rx_buffer->page); + + return match; +} + +static int igb_clean_test_rings(struct igb_ring *rx_ring, + struct igb_ring *tx_ring, + unsigned int size) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *rx_buffer_info; + struct igb_tx_buffer *tx_buffer_info; + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + + while (rx_desc->wb.upper.length) { + /* check Rx buffer */ + rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer_info->dma, + size, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (igb_check_lbtest_frame(rx_buffer_info, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer_info->dma, + size, + DMA_FROM_DEVICE); + + /* unmap buffer on Tx side */ + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer_info->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer_info, dma), + dma_unmap_len(tx_buffer_info, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer_info, len, 0); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* re-map buffers to ring, store next to clean values */ + igb_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int igb_run_loopback_test(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + u16 i, j, lc, good_cnt; + int ret_val = 0; + unsigned int size = IGB_RX_HDR_LEN; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + igb_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { /* loop count loop */ + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + break; + } + } /* end loop count loop */ + + /* free the original skb */ + kfree_skb(skb); + + return ret_val; +} + +static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) +{ + /* PHY loopback cannot be performed if SoL/IDER + * sessions are active + */ + if (igb_check_reset_block(&adapter->hw)) { + dev_err(&adapter->pdev->dev, + "Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + if (adapter->hw.mac.type == e1000_i354) { + dev_info(&adapter->pdev->dev, + "Loopback test not supported on i354.\n"); + *data = 0; + goto out; + } + *data = igb_setup_desc_rings(adapter); + if (*data) + goto out; + *data = igb_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = igb_run_loopback_test(adapter); + igb_loopback_cleanup(adapter); + +err_loopback: + igb_free_desc_rings(adapter); +out: + return *data; +} + +static int igb_link_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->mac.serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.autoneg) + msleep(5000); + + if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static void igb_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex, autoneg; + bool if_running = netif_running(netdev); + + set_bit(__IGB_TESTING, &adapter->state); + + /* can't do offline tests on media switching devices */ + if (adapter->hw.dev_spec._82575.mas_capable) + eth_test->flags &= ~ETH_TEST_FL_OFFLINE; + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + dev_info(&adapter->pdev->dev, "offline testing starting\n"); + + /* power up link for link test */ + igb_power_up_link(adapter); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (igb_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + igb_close(netdev); + else + igb_reset(adapter); + + if (igb_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_intr_test(adapter, &data[TEST_IRQ])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + /* power up link for loopback test */ + igb_power_up_link(adapter); + if (igb_loopback_test(adapter, &data[TEST_LOOP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = true; + igb_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = false; + + clear_bit(__IGB_TESTING, &adapter->state); + if (if_running) + igb_open(netdev); + } else { + dev_info(&adapter->pdev->dev, "online testing starting\n"); + + /* PHY is powered down when interface is down */ + if (if_running && igb_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + else + data[TEST_LINK] = 0; + + /* Online tests aren't run; pass by default */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + clear_bit(__IGB_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & E1000_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= E1000_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +/* bit defines for adapter->led_status */ +#define IGB_LED_ON 0 + +static int igb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + igb_blink_led(hw); + return 2; + case ETHTOOL_ID_ON: + igb_blink_led(hw); + break; + case ETHTOOL_ID_OFF: + igb_led_off(hw); + break; + case ETHTOOL_ID_INACTIVE: + igb_led_off(hw); + clear_bit(IGB_LED_ON, &adapter->led_status); + igb_cleanup_led(hw); + break; + } + + return 0; +} + +static int igb_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->tx_coalesce_usecs > 3) && + (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->tx_coalesce_usecs == 2)) + return -EINVAL; + + if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGB_FLAG_DMAC) + adapter->flags &= ~IGB_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +static int igb_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igb_nway_reset(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + igb_reinit_locked(adapter); + return 0; +} + +static int igb_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGB_STATS_LEN; + case ETH_SS_TEST: + return IGB_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGB_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igb_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igb_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igb_gstrings_stats[i].stat_offset; + data[i] = (igb_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; + data[i] = (igb_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i+1] = ring->tx_stats.bytes; + data[i+2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + data[i+2] += restart2; + + i += IGB_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i+1] = ring->rx_stats.bytes; + data[i+2] = ring->rx_stats.drops; + data[i+3] = ring->rx_stats.csum_err; + data[i+4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + i += IGB_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igb_gstrings_test, + IGB_TEST_LEN*ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_stats[i].stat_string); + for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igb_priv_flags_strings, + IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igb_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case e1000_82575: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + + /* 82576 does not support timestamping all packets. */ + if (adapter->hw.mac.type >= e1000_82580) + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + else + info->rx_filters |= + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igb_nfc_filter *rule = NULL; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + if (rule->filter.match_flags) { + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + if (rule->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = rule->filter.etype; + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = rule->filter.vlan_tci; + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + return 0; + } + return -EINVAL; +} + +static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_nfc_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igb_get_rss_hash_opts(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igb */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = igb_get_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = igb_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ + IGB_FLAG_RSS_FIELD_IPV6_UDP) +static int igb_set_rss_hash_opt(struct igb_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct e1000_hw *hw = &adapter->hw; + u32 mrqc = rd32(E1000_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + dev_err(&adapter->pdev->dev, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | + E1000_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(E1000_MRQC, mrqc); + } + + return 0; +} + +static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 i; + u32 etqf; + u16 etype; + + /* find an empty etype filter register */ + for (i = 0; i < MAX_ETYPE_FILTER; ++i) { + if (!adapter->etype_bitmap[i]) + break; + } + if (i == MAX_ETYPE_FILTER) { + dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); + return -EINVAL; + } + + adapter->etype_bitmap[i] = true; + + etqf = rd32(E1000_ETQF(i)); + etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); + + etqf |= E1000_ETQF_FILTER_ENABLE; + etqf &= ~E1000_ETQF_ETYPE_MASK; + etqf |= (etype & E1000_ETQF_ETYPE_MASK); + + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT) + & E1000_ETQF_QUEUE_MASK); + etqf |= E1000_ETQF_QUEUE_ENABLE; + + wr32(E1000_ETQF(i), etqf); + + input->etype_reg_index = i; + + return 0; +} + +static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u16 queue_index; + u32 vlapqf; + + vlapqf = rd32(E1000_VLAPQF); + vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) + >> VLAN_PRIO_SHIFT; + queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK; + + /* check whether this vlan prio is already set */ + if ((vlapqf & E1000_VLAPQF_P_VALID(vlan_priority)) && + (queue_index != input->action)) { + dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); + return -EEXIST; + } + + vlapqf |= E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf |= E1000_VLAPQF_QUEUE_SEL(vlan_priority, input->action); + + wr32(E1000_VLAPQF, vlapqf); + + return 0; +} + +int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + int err = -EINVAL; + + if (hw->mac.type == e1000_i210 && + !(input->filter.match_flags & ~IGB_FILTER_FLAG_SRC_MAC_ADDR)) { + dev_err(&adapter->pdev->dev, + "i210 doesn't support flow classification rules specifying only source addresses.\n"); + return -EOPNOTSUPP; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + err = igb_rxnfc_write_etype_filter(adapter, input); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.dst_addr, + input->action, 0); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + err = igb_rxnfc_write_vlan_prio_filter(adapter, input); + + return err; +} + +static void igb_clear_etype_filter_regs(struct igb_adapter *adapter, + u16 reg_index) +{ + struct e1000_hw *hw = &adapter->hw; + u32 etqf = rd32(E1000_ETQF(reg_index)); + + etqf &= ~E1000_ETQF_QUEUE_ENABLE; + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf &= ~E1000_ETQF_FILTER_ENABLE; + + wr32(E1000_ETQF(reg_index), etqf); + + adapter->etype_bitmap[reg_index] = false; +} + +static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter, + u16 vlan_tci) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u32 vlapqf; + + vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + vlapqf = rd32(E1000_VLAPQF); + vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf &= ~E1000_VLAPQF_QUEUE_SEL(vlan_priority, + E1000_VLAPQF_QUEUE_MASK); + + wr32(E1000_VLAPQF, vlapqf); +} + +int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) + igb_clear_etype_filter_regs(adapter, + input->etype_reg_index); + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + igb_clear_vlan_prio_filter(adapter, + ntohs(input->filter.vlan_tci)); + + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.dst_addr, + input->action, 0); + + return 0; +} + +static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter, + struct igb_nfc_filter *input, + u16 sw_idx) +{ + struct igb_nfc_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + if (!input) + err = igb_erase_filter(adapter, rule); + + hlist_del(&rule->nfc_node); + kfree(rule); + adapter->nfc_filter_count--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node */ + INIT_HLIST_NODE(&input->nfc_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->nfc_node, &parent->nfc_node); + else + hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); + + /* update counts */ + adapter->nfc_filter_count++; + + return 0; +} + +static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igb_nfc_filter *input, *rule; + int err = 0; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + /* Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if ((fsp->ring_cookie == RX_CLS_FLOW_DISC) || + (fsp->ring_cookie >= adapter->num_rx_queues)) { + dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); + return -EINVAL; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= IGB_MAX_RXNFC_FILTERS) { + dev_err(&adapter->pdev->dev, "Location out of range\n"); + return -EINVAL; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + input->filter.etype = fsp->h_u.ether_spec.h_proto; + input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE; + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + err = -EINVAL; + goto err_out; + } + input->filter.vlan_tci = fsp->h_ext.vlan_tci; + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + } + + input->action = fsp->ring_cookie; + input->sw_idx = fsp->location; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&input->filter, &rule->filter, + sizeof(input->filter))) { + err = -EEXIST; + dev_err(&adapter->pdev->dev, + "ethtool: this filter is already set\n"); + goto err_out_w_lock; + } + } + + err = igb_add_filter(adapter, input); + if (err) + goto err_out_w_lock; + + igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->nfc_lock); + return 0; + +err_out_w_lock: + spin_unlock(&adapter->nfc_lock); +err_out: + kfree(input); + return err; +} + +static int igb_del_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->nfc_lock); + err = igb_update_ethtool_nfc_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = igb_set_rss_hash_opt(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = igb_add_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = igb_del_ethtool_nfc_entry(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ret_val; + u16 phy_data; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + edata->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full); + if (!hw->dev_spec._82575.eee_disable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* The IPCNFG and EEER registers are not supported on I354. */ + if (hw->mac.type == e1000_i354) { + igb_get_eee_status_i354(hw, (bool *)&edata->eee_active); + } else { + u32 eeer; + + eeer = rd32(E1000_EEER); + + /* EEE status on negotiated link */ + if (eeer & E1000_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & E1000_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + } + + /* EEE Link Partner Advertised */ + switch (hw->mac.type) { + case e1000_i350: + ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + break; + case e1000_i354: + case e1000_i210: + case e1000_i211: + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, + E1000_EEE_LP_ADV_DEV_I210, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + break; + default: + break; + } + + edata->eee_enabled = !hw->dev_spec._82575.eee_disable; + + if ((hw->mac.type == e1000_i354) && + (edata->eee_enabled)) + edata->tx_lpi_enabled = true; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igb_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + bool adv1g_eee = true, adv100m_eee = true; + s32 ret_val; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igb_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + dev_err(&adapter->pdev->dev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + dev_err(&adapter->pdev->dev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (!edata->advertised || (edata->advertised & + ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { + dev_err(&adapter->pdev->dev, + "EEE Advertisement supports only 100Tx and/or 100T full duplex\n"); + return -EINVAL; + } + adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); + adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); + + } else if (!edata->eee_enabled) { + dev_err(&adapter->pdev->dev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { + hw->dev_spec._82575.eee_disable = !edata->eee_enabled; + adapter->flags |= IGB_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + } + + if (hw->mac.type == e1000_i354) + ret_val = igb_set_eee_i354(hw, adv1g_eee, adv100m_eee); + else + ret_val = igb_set_eee_i350(hw, adv1g_eee, adv100m_eee); + + if (ret_val) { + dev_err(&adapter->pdev->dev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + return 0; +} + +static int igb_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status = 0; + u16 sff8472_rev, addr_mode; + bool page_swap = false; + + if ((hw->phy.media_type == e1000_media_type_copper) || + (hw->phy.media_type == e1000_media_type_unknown)) + return -EOPNOTSUPP; + + /* Check whether we support SFF-8472 or not */ + status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); + if (status) + return -EIO; + + /* addressing mode is not supported */ + status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); + if (status) + return -EIO; + + /* addressing mode is not supported */ + if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { + hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { + /* We have an SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int igb_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status = 0; + u16 *dataword; + u16 first_word, last_word; + int i = 0; + + if (ee->len == 0) + return -EINVAL; + + first_word = ee->offset >> 1; + last_word = (ee->offset + ee->len - 1) >> 1; + + dataword = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!dataword) + return -ENOMEM; + + /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ + for (i = 0; i < last_word - first_word + 1; i++) { + status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2, + &dataword[i]); + if (status) { + /* Error occurred while reading module */ + kfree(dataword); + return -EIO; + } + + be16_to_cpus(&dataword[i]); + } + + memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len); + kfree(dataword); + + return 0; +} + +static int igb_ethtool_begin(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igb_ethtool_complete(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_put(&adapter->pdev->dev); +} + +static u32 igb_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGB_RETA_SIZE; +} + +static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGB_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +void igb_write_rss_indir_tbl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg = E1000_RETA(0); + u32 shift = 0; + int i = 0; + + switch (hw->mac.type) { + case e1000_82575: + shift = 6; + break; + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + shift = 3; + break; + default: + break; + } + + while (i < IGB_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int i; + u32 num_queues; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_queues = 2; + break; + default: + break; + } + + /* Verify user input. */ + for (i = 0; i < IGB_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + + for (i = 0; i < IGB_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igb_write_rss_indir_tbl(adapter); + + return 0; +} + +static unsigned int igb_max_channels(struct igb_adapter *adapter) +{ + return igb_get_max_rss_queues(adapter); +} + +static void igb_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igb_max_channels(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igb_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igb_max_channels(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igb_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igb_reinit_queues(adapter); + } + + return 0; +} + +static u32 igb_get_priv_flags(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGB_FLAG_RX_LEGACY) + priv_flags |= IGB_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGB_FLAG_RX_LEGACY; + if (priv_flags & IGB_PRIV_FLAGS_LEGACY_RX) + flags |= IGB_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igb_reinit_locked(adapter); + } + + return 0; +} + +static const struct ethtool_ops igb_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igb_get_drvinfo, + .get_regs_len = igb_get_regs_len, + .get_regs = igb_get_regs, + .get_wol = igb_get_wol, + .set_wol = igb_set_wol, + .get_msglevel = igb_get_msglevel, + .set_msglevel = igb_set_msglevel, + .nway_reset = igb_nway_reset, + .get_link = igb_get_link, + .get_eeprom_len = igb_get_eeprom_len, + .get_eeprom = igb_get_eeprom, + .set_eeprom = igb_set_eeprom, + .get_ringparam = igb_get_ringparam, + .set_ringparam = igb_set_ringparam, + .get_pauseparam = igb_get_pauseparam, + .set_pauseparam = igb_set_pauseparam, + .self_test = igb_diag_test, + .get_strings = igb_get_strings, + .set_phys_id = igb_set_phys_id, + .get_sset_count = igb_get_sset_count, + .get_ethtool_stats = igb_get_ethtool_stats, + .get_coalesce = igb_get_coalesce, + .set_coalesce = igb_set_coalesce, + .get_ts_info = igb_get_ts_info, + .get_rxnfc = igb_get_rxnfc, + .set_rxnfc = igb_set_rxnfc, + .get_eee = igb_get_eee, + .set_eee = igb_set_eee, + .get_module_info = igb_get_module_info, + .get_module_eeprom = igb_get_module_eeprom, + .get_rxfh_indir_size = igb_get_rxfh_indir_size, + .get_rxfh = igb_get_rxfh, + .set_rxfh = igb_set_rxfh, + .get_channels = igb_get_channels, + .set_channels = igb_set_channels, + .get_priv_flags = igb_get_priv_flags, + .set_priv_flags = igb_set_priv_flags, + .begin = igb_ethtool_begin, + .complete = igb_ethtool_complete, + .get_link_ksettings = igb_get_link_ksettings, + .set_link_ksettings = igb_set_link_ksettings, +}; + +void igb_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igb_ethtool_ops; +} diff --git a/devices/igb/igb_ethtool-6.1-ethercat.c b/devices/igb/igb_ethtool-6.1-ethercat.c new file mode 100644 index 00000000..42bc6496 --- /dev/null +++ b/devices/igb/igb_ethtool-6.1-ethercat.c @@ -0,0 +1,3507 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* ethtool support for igb */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igb-6.1-ethercat.h" + +struct igb_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGB_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igb_adapter, _stat), \ + .stat_offset = offsetof(struct igb_adapter, _stat) \ +} +static const struct igb_stats igb_gstrings_stats[] = { + IGB_STAT("rx_packets", stats.gprc), + IGB_STAT("tx_packets", stats.gptc), + IGB_STAT("rx_bytes", stats.gorc), + IGB_STAT("tx_bytes", stats.gotc), + IGB_STAT("rx_broadcast", stats.bprc), + IGB_STAT("tx_broadcast", stats.bptc), + IGB_STAT("rx_multicast", stats.mprc), + IGB_STAT("tx_multicast", stats.mptc), + IGB_STAT("multicast", stats.mprc), + IGB_STAT("collisions", stats.colc), + IGB_STAT("rx_crc_errors", stats.crcerrs), + IGB_STAT("rx_no_buffer_count", stats.rnbc), + IGB_STAT("rx_missed_errors", stats.mpc), + IGB_STAT("tx_aborted_errors", stats.ecol), + IGB_STAT("tx_carrier_errors", stats.tncrs), + IGB_STAT("tx_window_errors", stats.latecol), + IGB_STAT("tx_abort_late_coll", stats.latecol), + IGB_STAT("tx_deferred_ok", stats.dc), + IGB_STAT("tx_single_coll_ok", stats.scc), + IGB_STAT("tx_multi_coll_ok", stats.mcc), + IGB_STAT("tx_timeout_count", tx_timeout_count), + IGB_STAT("rx_long_length_errors", stats.roc), + IGB_STAT("rx_short_length_errors", stats.ruc), + IGB_STAT("rx_align_errors", stats.algnerrc), + IGB_STAT("tx_tcp_seg_good", stats.tsctc), + IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGB_STAT("rx_flow_control_xon", stats.xonrxc), + IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGB_STAT("tx_flow_control_xon", stats.xontxc), + IGB_STAT("tx_flow_control_xoff", stats.xofftxc), + IGB_STAT("rx_long_byte_count", stats.gorc), + IGB_STAT("tx_dma_out_of_sync", stats.doosync), + IGB_STAT("tx_smbus", stats.mgptc), + IGB_STAT("rx_smbus", stats.mgprc), + IGB_STAT("dropped_smbus", stats.mgpdc), + IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGB_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +#define IGB_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} +static const struct igb_stats igb_gstrings_net_stats[] = { + IGB_NETDEV_STAT(rx_errors), + IGB_NETDEV_STAT(tx_errors), + IGB_NETDEV_STAT(tx_dropped), + IGB_NETDEV_STAT(rx_length_errors), + IGB_NETDEV_STAT(rx_over_errors), + IGB_NETDEV_STAT(rx_frame_errors), + IGB_NETDEV_STAT(rx_fifo_errors), + IGB_NETDEV_STAT(tx_fifo_errors), + IGB_NETDEV_STAT(tx_heartbeat_errors) +}; + +#define IGB_GLOBAL_STATS_LEN \ + (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) +#define IGB_NETDEV_STATS_LEN \ + (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) +#define IGB_RX_QUEUE_STATS_LEN \ + (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) + +#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ + +#define IGB_QUEUE_STATS_LEN \ + ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGB_RX_QUEUE_STATS_LEN) + \ + (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGB_TX_QUEUE_STATS_LEN)) +#define IGB_STATS_LEN \ + (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) + +enum igb_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; +#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) + +static const char igb_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGB_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGB_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igb_priv_flags_strings) + +static int igb_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u32 status; + u32 speed; + u32 supported, advertising; + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(E1000_STATUS); + if (hw->phy.media_type == e1000_media_type_copper) { + + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause); + advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->phy.autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + } else { + supported = (SUPPORTED_FIBRE | + SUPPORTED_1000baseKX_Full | + SUPPORTED_Autoneg | + SUPPORTED_Pause); + advertising = (ADVERTISED_FIBRE | + ADVERTISED_1000baseKX_Full); + if (hw->mac.type == e1000_i354) { + if ((hw->device_id == + E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + supported |= SUPPORTED_2500baseX_Full; + supported &= ~SUPPORTED_1000baseKX_Full; + advertising |= ADVERTISED_2500baseX_Full; + advertising &= ~ADVERTISED_1000baseKX_Full; + } + } + if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { + supported |= SUPPORTED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; + } + if (hw->mac.autoneg == 1) + advertising |= ADVERTISED_Autoneg; + + cmd->base.port = PORT_FIBRE; + } + if (hw->mac.autoneg != 1) + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + + switch (hw->fc.requested_mode) { + case e1000_fc_full: + advertising |= ADVERTISED_Pause; + break; + case e1000_fc_rx_pause: + advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + break; + case e1000_fc_tx_pause: + advertising |= ADVERTISED_Asym_Pause; + break; + default: + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + if (status & E1000_STATUS_LU) { + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + speed = SPEED_2500; + } else if (status & E1000_STATUS_SPEED_1000) { + speed = SPEED_1000; + } else if (status & E1000_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == e1000_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int igb_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed + */ + if (igb_check_reset_block(hw)) { + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when SoL/IDER is active.\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->phy.media_type == e1000_media_type_fiber) { + hw->phy.autoneg_advertised = advertising | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + switch (adapter->link_speed) { + case SPEED_2500: + hw->phy.autoneg_advertised = + ADVERTISED_2500baseX_Full; + break; + case SPEED_1000: + hw->phy.autoneg_advertised = + ADVERTISED_1000baseT_Full; + break; + case SPEED_100: + hw->phy.autoneg_advertised = + ADVERTISED_100baseT_Full; + break; + default: + break; + } + } else { + hw->phy.autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } + advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (igb_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__IGB_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + return 0; +} + +static u32 igb_get_link(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igb_has_link(adapter); +} + +static void igb_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == e1000_fc_rx_pause) + pause->rx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_tx_pause) + pause->tx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igb_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + int i; + + /* 100basefx does not support setting link flow control */ + if (hw->dev_spec._82575.eth_flags.e100_base_fx) + return -EINVAL; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = e1000_fc_default; + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else { + igb_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == e1000_media_type_copper) ? + igb_force_mac_fc(hw) : igb_setup_link(hw)); + + /* Make sure SRRCTL considers new fc settings for each ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + igb_setup_srrctl(adapter, ring); + } + } + + clear_bit(__IGB_RESETTING, &adapter->state); + return retval; +} + +static u32 igb_get_msglevel(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void igb_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int igb_get_regs_len(struct net_device *netdev) +{ +#define IGB_REGS_LEN 740 + return IGB_REGS_LEN * sizeof(u32); +} + +static void igb_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGB_REGS_LEN * sizeof(u32)); + + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(E1000_CTRL); + regs_buff[1] = rd32(E1000_STATUS); + regs_buff[2] = rd32(E1000_CTRL_EXT); + regs_buff[3] = rd32(E1000_MDIC); + regs_buff[4] = rd32(E1000_SCTL); + regs_buff[5] = rd32(E1000_CONNSW); + regs_buff[6] = rd32(E1000_VET); + regs_buff[7] = rd32(E1000_LEDCTL); + regs_buff[8] = rd32(E1000_PBA); + regs_buff[9] = rd32(E1000_PBS); + regs_buff[10] = rd32(E1000_FRTIMER); + regs_buff[11] = rd32(E1000_TCPTIMER); + + /* NVM Register */ + regs_buff[12] = rd32(E1000_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[13] = rd32(E1000_EICS); + regs_buff[14] = rd32(E1000_EICS); + regs_buff[15] = rd32(E1000_EIMS); + regs_buff[16] = rd32(E1000_EIMC); + regs_buff[17] = rd32(E1000_EIAC); + regs_buff[18] = rd32(E1000_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[19] = rd32(E1000_ICS); + regs_buff[20] = rd32(E1000_ICS); + regs_buff[21] = rd32(E1000_IMS); + regs_buff[22] = rd32(E1000_IMC); + regs_buff[23] = rd32(E1000_IAC); + regs_buff[24] = rd32(E1000_IAM); + regs_buff[25] = rd32(E1000_IMIRVP); + + /* Flow Control */ + regs_buff[26] = rd32(E1000_FCAL); + regs_buff[27] = rd32(E1000_FCAH); + regs_buff[28] = rd32(E1000_FCTTV); + regs_buff[29] = rd32(E1000_FCRTL); + regs_buff[30] = rd32(E1000_FCRTH); + regs_buff[31] = rd32(E1000_FCRTV); + + /* Receive */ + regs_buff[32] = rd32(E1000_RCTL); + regs_buff[33] = rd32(E1000_RXCSUM); + regs_buff[34] = rd32(E1000_RLPML); + regs_buff[35] = rd32(E1000_RFCTL); + regs_buff[36] = rd32(E1000_MRQC); + regs_buff[37] = rd32(E1000_VT_CTL); + + /* Transmit */ + regs_buff[38] = rd32(E1000_TCTL); + regs_buff[39] = rd32(E1000_TCTL_EXT); + regs_buff[40] = rd32(E1000_TIPG); + regs_buff[41] = rd32(E1000_DTXCTL); + + /* Wake Up */ + regs_buff[42] = rd32(E1000_WUC); + regs_buff[43] = rd32(E1000_WUFC); + regs_buff[44] = rd32(E1000_WUS); + regs_buff[45] = rd32(E1000_IPAV); + regs_buff[46] = rd32(E1000_WUPL); + + /* MAC */ + regs_buff[47] = rd32(E1000_PCS_CFG0); + regs_buff[48] = rd32(E1000_PCS_LCTL); + regs_buff[49] = rd32(E1000_PCS_LSTAT); + regs_buff[50] = rd32(E1000_PCS_ANADV); + regs_buff[51] = rd32(E1000_PCS_LPAB); + regs_buff[52] = rd32(E1000_PCS_NPTX); + regs_buff[53] = rd32(E1000_PCS_LPABNP); + + /* Statistics */ + regs_buff[54] = adapter->stats.crcerrs; + regs_buff[55] = adapter->stats.algnerrc; + regs_buff[56] = adapter->stats.symerrs; + regs_buff[57] = adapter->stats.rxerrc; + regs_buff[58] = adapter->stats.mpc; + regs_buff[59] = adapter->stats.scc; + regs_buff[60] = adapter->stats.ecol; + regs_buff[61] = adapter->stats.mcc; + regs_buff[62] = adapter->stats.latecol; + regs_buff[63] = adapter->stats.colc; + regs_buff[64] = adapter->stats.dc; + regs_buff[65] = adapter->stats.tncrs; + regs_buff[66] = adapter->stats.sec; + regs_buff[67] = adapter->stats.htdpmc; + regs_buff[68] = adapter->stats.rlec; + regs_buff[69] = adapter->stats.xonrxc; + regs_buff[70] = adapter->stats.xontxc; + regs_buff[71] = adapter->stats.xoffrxc; + regs_buff[72] = adapter->stats.xofftxc; + regs_buff[73] = adapter->stats.fcruc; + regs_buff[74] = adapter->stats.prc64; + regs_buff[75] = adapter->stats.prc127; + regs_buff[76] = adapter->stats.prc255; + regs_buff[77] = adapter->stats.prc511; + regs_buff[78] = adapter->stats.prc1023; + regs_buff[79] = adapter->stats.prc1522; + regs_buff[80] = adapter->stats.gprc; + regs_buff[81] = adapter->stats.bprc; + regs_buff[82] = adapter->stats.mprc; + regs_buff[83] = adapter->stats.gptc; + regs_buff[84] = adapter->stats.gorc; + regs_buff[86] = adapter->stats.gotc; + regs_buff[88] = adapter->stats.rnbc; + regs_buff[89] = adapter->stats.ruc; + regs_buff[90] = adapter->stats.rfc; + regs_buff[91] = adapter->stats.roc; + regs_buff[92] = adapter->stats.rjc; + regs_buff[93] = adapter->stats.mgprc; + regs_buff[94] = adapter->stats.mgpdc; + regs_buff[95] = adapter->stats.mgptc; + regs_buff[96] = adapter->stats.tor; + regs_buff[98] = adapter->stats.tot; + regs_buff[100] = adapter->stats.tpr; + regs_buff[101] = adapter->stats.tpt; + regs_buff[102] = adapter->stats.ptc64; + regs_buff[103] = adapter->stats.ptc127; + regs_buff[104] = adapter->stats.ptc255; + regs_buff[105] = adapter->stats.ptc511; + regs_buff[106] = adapter->stats.ptc1023; + regs_buff[107] = adapter->stats.ptc1522; + regs_buff[108] = adapter->stats.mptc; + regs_buff[109] = adapter->stats.bptc; + regs_buff[110] = adapter->stats.tsctc; + regs_buff[111] = adapter->stats.iac; + regs_buff[112] = adapter->stats.rpthc; + regs_buff[113] = adapter->stats.hgptc; + regs_buff[114] = adapter->stats.hgorc; + regs_buff[116] = adapter->stats.hgotc; + regs_buff[118] = adapter->stats.lenerrs; + regs_buff[119] = adapter->stats.scvpc; + regs_buff[120] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[129 + i] = rd32(E1000_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[133 + i] = rd32(E1000_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[137 + i] = rd32(E1000_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[141 + i] = rd32(E1000_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[145 + i] = rd32(E1000_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[153 + i] = rd32(E1000_EITR(i)); + for (i = 0; i < 8; i++) + regs_buff[163 + i] = rd32(E1000_IMIR(i)); + for (i = 0; i < 8; i++) + regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); + for (i = 0; i < 16; i++) + regs_buff[179 + i] = rd32(E1000_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[195 + i] = rd32(E1000_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[211 + i] = rd32(E1000_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[215 + i] = rd32(E1000_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[219 + i] = rd32(E1000_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[223 + i] = rd32(E1000_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[227 + i] = rd32(E1000_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); + + for (i = 0; i < 4; i++) + regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); + for (i = 0; i < 32; i++) + regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); + + regs_buff[547] = rd32(E1000_TDFH); + regs_buff[548] = rd32(E1000_TDFT); + regs_buff[549] = rd32(E1000_TDFHS); + regs_buff[550] = rd32(E1000_TDFPC); + + if (hw->mac.type > e1000_82580) { + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; + } + + if (hw->mac.type == e1000_82576) { + for (i = 0; i < 12; i++) + regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); + for (i = 0; i < 4; i++) + regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); + + for (i = 0; i < 12; i++) + regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); + } + + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + regs_buff[739] = rd32(E1000_I210_RR2DCDELAY); +} + +static int igb_get_eeprom_len(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int igb_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igb_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if ((hw->mac.type >= e1000_i210) && + !igb_get_flash_presence_i210(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + if (ret_val) + goto out; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + igb_set_fw_version(adapter); +out: + kfree(eeprom_buff); + return ret_val; +} + +static void igb_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + strscpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); + + /* EEPROM image version # is reported as firmware version # for + * 82575 controllers + */ + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN; +} + +static void igb_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGB_MAX_RXD; + ring->tx_max_pending = IGB_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int igb_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_ring *temp_ring; + int i, err = 0; + u16 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igb_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igb_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igb_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igb_ring)); + + temp_ring[i].count = new_tx_count; + err = igb_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igb_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igb_ring)); + + temp_ring[i].count = new_rx_count; + err = igb_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igb_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igb_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGB_RESETTING, &adapter->state); + return err; +} + +/* ethtool register test data */ +struct igb_reg_test { + u16 reg; + u16 reg_offset; + u16 array_len; + u16 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x100 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* i210 reg test */ +static struct igb_reg_test reg_test_i210[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i210, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0, 0 } +}; + +/* i350 reg test */ +static struct igb_reg_test reg_test_i350[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i350, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82580 reg test */ +static struct igb_reg_test reg_test_82580[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* RDH is read-only for 82580, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82576 reg test */ +static struct igb_reg_test reg_test_82576[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* Enable all RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82576, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82575 register test */ +static struct igb_reg_test reg_test_82575[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82575, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pat, val; + static const u32 _test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { + wr32(reg, (_test[pat] & write)); + val = rd32(reg) & mask; + if (val != (_test[pat] & write & mask)) { + dev_err(&adapter->pdev->dev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, (_test[pat] & write & mask)); + *data = reg; + return true; + } + } + + return false; +} + +static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val; + + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + dev_err(&adapter->pdev->dev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + return true; + } + + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int igb_reg_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_test *test; + u32 value, before, after; + u32 i, toggle; + + switch (adapter->hw.mac.type) { + case e1000_i350: + case e1000_i354: + test = reg_test_i350; + toggle = 0x7FEFF3FF; + break; + case e1000_i210: + case e1000_i211: + test = reg_test_i210; + toggle = 0x7FEFF3FF; + break; + case e1000_82580: + test = reg_test_82580; + toggle = 0x7FEFF3FF; + break; + case e1000_82576: + test = reg_test_82576; + toggle = 0x7FFFF3FF; + break; + default: + test = reg_test_82575; + toggle = 0x7FFFF3FF; + break; + } + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writable on newer MACs. + */ + before = rd32(E1000_STATUS); + value = (rd32(E1000_STATUS) & toggle); + wr32(E1000_STATUS, toggle); + after = rd32(E1000_STATUS) & toggle; + if (value != after) { + dev_err(&adapter->pdev->dev, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ + wr32(E1000_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + REG_PATTERN_TEST(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case SET_READ_TEST: + REG_SET_AND_CHECK(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + writel(test->write, + (adapter->hw.hw_addr + test->reg) + + (i * test->reg_offset)); + break; + case TABLE32_TEST: + REG_PATTERN_TEST(test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + REG_PATTERN_TEST(test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + REG_PATTERN_TEST((test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + } + test++; + } + + *data = 0; + return 0; +} + +static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* Validate eeprom on all parts but flashless */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + } + break; + default: + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + break; + } + + return *data; +} + +static irqreturn_t igb_test_intr(int irq, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *) data; + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= rd32(E1000_ICR); + + return IRQ_HANDLED; +} + +static int igb_intr_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 mask, ics_mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + if (request_irq(adapter->msix_entries[0].vector, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8); + wr32(E1000_EIMS, BIT(0)); + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { + shared_int = false; + if (request_irq(irq, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, + netdev->name, adapter)) { + shared_int = false; + } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, + netdev->name, adapter)) { + *data = 1; + return -1; + } + dev_info(&adapter->pdev->dev, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + usleep_range(10000, 11000); + + /* Define all writable bits for ICS */ + switch (hw->mac.type) { + case e1000_82575: + ics_mask = 0x37F47EDD; + break; + case e1000_82576: + ics_mask = 0x77D4FBFD; + break; + case e1000_82580: + ics_mask = 0x77DCFED5; + break; + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + ics_mask = 0x77DCFED5; + break; + default: + ics_mask = 0x7FFFFFFF; + break; + } + + /* Test each interrupt */ + for (; i < 31; i++) { + /* Interrupt to test */ + mask = BIT(i); + + if (!(mask & ics_mask)) + continue; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, mask); + wr32(E1000_ICS, mask); + wrfl(); + usleep_range(10000, 11000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMS, mask); + wr32(E1000_ICS, mask); + wrfl(); + usleep_range(10000, 11000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, ~mask); + wr32(E1000_ICS, ~mask); + wrfl(); + usleep_range(10000, 11000); + + if (adapter->test_icr & mask) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + usleep_range(10000, 11000); + + /* Unhook test interrupt handler */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) + free_irq(adapter->msix_entries[0].vector, adapter); + else + free_irq(irq, adapter); + + return *data; +} + +static void igb_free_desc_rings(struct igb_adapter *adapter) +{ + igb_free_tx_resources(&adapter->test_tx_ring); + igb_free_rx_resources(&adapter->test_rx_ring); +} + +static int igb_setup_desc_rings(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + struct e1000_hw *hw = &adapter->hw; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IGB_DEFAULT_TXD; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_tx_resources(tx_ring)) { + ret_val = 1; + goto err_nomem; + } + + igb_setup_tctl(adapter); + igb_configure_tx_ring(adapter, tx_ring); + + /* Setup Rx descriptor ring and Rx buffers */ + rx_ring->count = IGB_DEFAULT_RXD; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_rx_resources(rx_ring)) { + ret_val = 3; + goto err_nomem; + } + + /* set the default queue to queue 0 of PF */ + wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); + + /* enable receive ring */ + igb_setup_rctl(adapter); + igb_configure_rx_ring(adapter, rx_ring); + + igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); + + return 0; + +err_nomem: + igb_free_desc_rings(adapter); + return ret_val; +} + +static void igb_phy_disable_receiver(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + igb_write_phy_reg(hw, 29, 0x001F); + igb_write_phy_reg(hw, 30, 0x8FFC); + igb_write_phy_reg(hw, 29, 0x001A); + igb_write_phy_reg(hw, 30, 0x8FF0); +} + +static int igb_integrated_phy_loopback(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + + hw->mac.autoneg = false; + + if (hw->phy.type == e1000_phy_m88) { + if (hw->phy.id != I210_I_PHY_ID) { + /* Auto-MDI/MDIX Off */ + igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else { + /* force 1000, set loopback */ + igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + } + } else if (hw->phy.type == e1000_phy_82580) { + /* enable MII loopback */ + igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); + } + + /* add small delay to avoid loopback test failure */ + msleep(50); + + /* force 1000, set loopback */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD | /* Force Duplex to FULL */ + E1000_CTRL_SLU); /* Set link up enable bit */ + + if (hw->phy.type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + + wr32(E1000_CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy.type == e1000_phy_m88) + igb_phy_disable_receiver(adapter); + + msleep(500); + return 0; +} + +static int igb_set_phy_loopback(struct igb_adapter *adapter) +{ + return igb_integrated_phy_loopback(adapter); +} + +static int igb_setup_loopback_test(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + reg = rd32(E1000_CTRL_EXT); + + /* use CTRL_EXT to identify link type as SGMII can appear as copper */ + if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII) || + (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { + /* Enable DH89xxCC MPHY for near end loopback */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + reg = rd32(E1000_RCTL); + reg |= E1000_RCTL_LBM_TCVR; + wr32(E1000_RCTL, reg); + + wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); + + reg = rd32(E1000_CTRL); + reg &= ~(E1000_CTRL_RFCE | + E1000_CTRL_TFCE | + E1000_CTRL_LRST); + reg |= E1000_CTRL_SLU | + E1000_CTRL_FD; + wr32(E1000_CTRL, reg); + + /* Unset switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg &= ~E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + + /* Unset sigdetect for SERDES loopback on + * 82580 and newer devices. + */ + if (hw->mac.type >= e1000_82580) { + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_IGN_SD; + wr32(E1000_PCS_CFG0, reg); + } + + /* Set PCS register for forced speed */ + reg = rd32(E1000_PCS_LCTL); + reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ + reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ + E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ + E1000_PCS_LCTL_FSD | /* Force Speed */ + E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ + wr32(E1000_PCS_LCTL, reg); + + return 0; + } + + return igb_set_phy_loopback(adapter); +} + +static void igb_loopback_cleanup(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII)) { + u32 reg; + + /* Disable near end loopback on DH89xxCC */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + rctl = rd32(E1000_RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + wr32(E1000_RCTL, rctl); + + hw->mac.autoneg = true; + igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); + igb_phy_sw_reset(hw); + } +} + +static void igb_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size /= 2; + memset(&skb->data[frame_size], 0xAA, frame_size - 1); + skb->data[frame_size + 10] = 0xBE; + skb->data[frame_size + 12] = 0xAF; +} + +static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + + data = kmap_local_page(rx_buffer->page); + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap_local(data); + + return match; +} + +static int igb_clean_test_rings(struct igb_ring *rx_ring, + struct igb_ring *tx_ring, + unsigned int size) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *rx_buffer_info; + struct igb_tx_buffer *tx_buffer_info; + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + + while (rx_desc->wb.upper.length) { + /* check Rx buffer */ + rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer_info->dma, + size, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (igb_check_lbtest_frame(rx_buffer_info, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer_info->dma, + size, + DMA_FROM_DEVICE); + + /* unmap buffer on Tx side */ + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer_info->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer_info, dma), + dma_unmap_len(tx_buffer_info, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer_info, len, 0); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* re-map buffers to ring, store next to clean values */ + igb_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int igb_run_loopback_test(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + u16 i, j, lc, good_cnt; + int ret_val = 0; + unsigned int size = IGB_RX_HDR_LEN; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + igb_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { /* loop count loop */ + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + break; + } + } /* end loop count loop */ + + /* free the original skb */ + kfree_skb(skb); + + return ret_val; +} + +static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) +{ + /* PHY loopback cannot be performed if SoL/IDER + * sessions are active + */ + if (igb_check_reset_block(&adapter->hw)) { + dev_err(&adapter->pdev->dev, + "Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + if (adapter->hw.mac.type == e1000_i354) { + dev_info(&adapter->pdev->dev, + "Loopback test not supported on i354.\n"); + *data = 0; + goto out; + } + *data = igb_setup_desc_rings(adapter); + if (*data) + goto out; + *data = igb_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = igb_run_loopback_test(adapter); + igb_loopback_cleanup(adapter); + +err_loopback: + igb_free_desc_rings(adapter); +out: + return *data; +} + +static int igb_link_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->mac.serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.autoneg) + msleep(5000); + + if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static void igb_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex, autoneg; + bool if_running = netif_running(netdev); + + set_bit(__IGB_TESTING, &adapter->state); + + /* can't do offline tests on media switching devices */ + if (adapter->hw.dev_spec._82575.mas_capable) + eth_test->flags &= ~ETH_TEST_FL_OFFLINE; + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + dev_info(&adapter->pdev->dev, "offline testing starting\n"); + + /* power up link for link test */ + igb_power_up_link(adapter); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (igb_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + igb_close(netdev); + else + igb_reset(adapter); + + if (igb_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_intr_test(adapter, &data[TEST_IRQ])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + /* power up link for loopback test */ + igb_power_up_link(adapter); + if (igb_loopback_test(adapter, &data[TEST_LOOP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = true; + igb_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = false; + + clear_bit(__IGB_TESTING, &adapter->state); + if (if_running) + igb_open(netdev); + } else { + dev_info(&adapter->pdev->dev, "online testing starting\n"); + + /* PHY is powered down when interface is down */ + if (if_running && igb_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + else + data[TEST_LINK] = 0; + + /* Online tests aren't run; pass by default */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + clear_bit(__IGB_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & E1000_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= E1000_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +/* bit defines for adapter->led_status */ +#define IGB_LED_ON 0 + +static int igb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + igb_blink_led(hw); + return 2; + case ETHTOOL_ID_ON: + igb_blink_led(hw); + break; + case ETHTOOL_ID_OFF: + igb_led_off(hw); + break; + case ETHTOOL_ID_INACTIVE: + igb_led_off(hw); + clear_bit(IGB_LED_ON, &adapter->led_status); + igb_cleanup_led(hw); + break; + } + + return 0; +} + +static int igb_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->tx_coalesce_usecs > 3) && + (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->tx_coalesce_usecs == 2)) + return -EINVAL; + + if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGB_FLAG_DMAC) + adapter->flags &= ~IGB_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +static int igb_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igb_nway_reset(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + igb_reinit_locked(adapter); + return 0; +} + +static int igb_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGB_STATS_LEN; + case ETH_SS_TEST: + return IGB_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGB_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igb_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igb_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igb_gstrings_stats[i].stat_offset; + data[i] = (igb_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; + data[i] = (igb_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i+1] = ring->tx_stats.bytes; + data[i+2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); + data[i+2] += restart2; + + i += IGB_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i+1] = ring->rx_stats.bytes; + data[i+2] = ring->rx_stats.drops; + data[i+3] = ring->rx_stats.csum_err; + data[i+4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + i += IGB_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, igb_gstrings_test, sizeof(igb_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_stats[i].stat_string); + for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igb_priv_flags_strings, + IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igb_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case e1000_82575: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + + /* 82576 does not support timestamping all packets. */ + if (adapter->hw.mac.type >= e1000_82580) + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + else + info->rx_filters |= + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igb_nfc_filter *rule = NULL; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + if (rule->filter.match_flags) { + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + if (rule->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = rule->filter.etype; + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = rule->filter.vlan_tci; + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + return 0; + } + return -EINVAL; +} + +static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_nfc_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igb_get_rss_hash_opts(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igb */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = igb_get_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = igb_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ + IGB_FLAG_RSS_FIELD_IPV6_UDP) +static int igb_set_rss_hash_opt(struct igb_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct e1000_hw *hw = &adapter->hw; + u32 mrqc = rd32(E1000_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + dev_err(&adapter->pdev->dev, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | + E1000_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(E1000_MRQC, mrqc); + } + + return 0; +} + +static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 i; + u32 etqf; + u16 etype; + + /* find an empty etype filter register */ + for (i = 0; i < MAX_ETYPE_FILTER; ++i) { + if (!adapter->etype_bitmap[i]) + break; + } + if (i == MAX_ETYPE_FILTER) { + dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); + return -EINVAL; + } + + adapter->etype_bitmap[i] = true; + + etqf = rd32(E1000_ETQF(i)); + etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); + + etqf |= E1000_ETQF_FILTER_ENABLE; + etqf &= ~E1000_ETQF_ETYPE_MASK; + etqf |= (etype & E1000_ETQF_ETYPE_MASK); + + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT) + & E1000_ETQF_QUEUE_MASK); + etqf |= E1000_ETQF_QUEUE_ENABLE; + + wr32(E1000_ETQF(i), etqf); + + input->etype_reg_index = i; + + return 0; +} + +static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u16 queue_index; + u32 vlapqf; + + vlapqf = rd32(E1000_VLAPQF); + vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) + >> VLAN_PRIO_SHIFT; + queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK; + + /* check whether this vlan prio is already set */ + if ((vlapqf & E1000_VLAPQF_P_VALID(vlan_priority)) && + (queue_index != input->action)) { + dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); + return -EEXIST; + } + + vlapqf |= E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf |= E1000_VLAPQF_QUEUE_SEL(vlan_priority, input->action); + + wr32(E1000_VLAPQF, vlapqf); + + return 0; +} + +int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + int err = -EINVAL; + + if (hw->mac.type == e1000_i210 && + !(input->filter.match_flags & ~IGB_FILTER_FLAG_SRC_MAC_ADDR)) { + dev_err(&adapter->pdev->dev, + "i210 doesn't support flow classification rules specifying only source addresses.\n"); + return -EOPNOTSUPP; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + err = igb_rxnfc_write_etype_filter(adapter, input); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.dst_addr, + input->action, 0); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + err = igb_rxnfc_write_vlan_prio_filter(adapter, input); + + return err; +} + +static void igb_clear_etype_filter_regs(struct igb_adapter *adapter, + u16 reg_index) +{ + struct e1000_hw *hw = &adapter->hw; + u32 etqf = rd32(E1000_ETQF(reg_index)); + + etqf &= ~E1000_ETQF_QUEUE_ENABLE; + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf &= ~E1000_ETQF_FILTER_ENABLE; + + wr32(E1000_ETQF(reg_index), etqf); + + adapter->etype_bitmap[reg_index] = false; +} + +static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter, + u16 vlan_tci) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u32 vlapqf; + + vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + vlapqf = rd32(E1000_VLAPQF); + vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf &= ~E1000_VLAPQF_QUEUE_SEL(vlan_priority, + E1000_VLAPQF_QUEUE_MASK); + + wr32(E1000_VLAPQF, vlapqf); +} + +int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) + igb_clear_etype_filter_regs(adapter, + input->etype_reg_index); + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + igb_clear_vlan_prio_filter(adapter, + ntohs(input->filter.vlan_tci)); + + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.dst_addr, + input->action, 0); + + return 0; +} + +static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter, + struct igb_nfc_filter *input, + u16 sw_idx) +{ + struct igb_nfc_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + if (!input) + err = igb_erase_filter(adapter, rule); + + hlist_del(&rule->nfc_node); + kfree(rule); + adapter->nfc_filter_count--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node */ + INIT_HLIST_NODE(&input->nfc_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->nfc_node, &parent->nfc_node); + else + hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); + + /* update counts */ + adapter->nfc_filter_count++; + + return 0; +} + +static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igb_nfc_filter *input, *rule; + int err = 0; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + /* Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if ((fsp->ring_cookie == RX_CLS_FLOW_DISC) || + (fsp->ring_cookie >= adapter->num_rx_queues)) { + dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); + return -EINVAL; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= IGB_MAX_RXNFC_FILTERS) { + dev_err(&adapter->pdev->dev, "Location out of range\n"); + return -EINVAL; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + input->filter.etype = fsp->h_u.ether_spec.h_proto; + input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE; + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + err = -EINVAL; + goto err_out; + } + input->filter.vlan_tci = fsp->h_ext.vlan_tci; + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + } + + input->action = fsp->ring_cookie; + input->sw_idx = fsp->location; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&input->filter, &rule->filter, + sizeof(input->filter))) { + err = -EEXIST; + dev_err(&adapter->pdev->dev, + "ethtool: this filter is already set\n"); + goto err_out_w_lock; + } + } + + err = igb_add_filter(adapter, input); + if (err) + goto err_out_w_lock; + + igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->nfc_lock); + return 0; + +err_out_w_lock: + spin_unlock(&adapter->nfc_lock); +err_out: + kfree(input); + return err; +} + +static int igb_del_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->nfc_lock); + err = igb_update_ethtool_nfc_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = igb_set_rss_hash_opt(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = igb_add_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = igb_del_ethtool_nfc_entry(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ret_val; + u16 phy_data; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + edata->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full); + if (!hw->dev_spec._82575.eee_disable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* The IPCNFG and EEER registers are not supported on I354. */ + if (hw->mac.type == e1000_i354) { + igb_get_eee_status_i354(hw, (bool *)&edata->eee_active); + } else { + u32 eeer; + + eeer = rd32(E1000_EEER); + + /* EEE status on negotiated link */ + if (eeer & E1000_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & E1000_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + } + + /* EEE Link Partner Advertised */ + switch (hw->mac.type) { + case e1000_i350: + ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + break; + case e1000_i354: + case e1000_i210: + case e1000_i211: + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, + E1000_EEE_LP_ADV_DEV_I210, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + break; + default: + break; + } + + edata->eee_enabled = !hw->dev_spec._82575.eee_disable; + + if ((hw->mac.type == e1000_i354) && + (edata->eee_enabled)) + edata->tx_lpi_enabled = true; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igb_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + bool adv1g_eee = true, adv100m_eee = true; + s32 ret_val; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igb_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + dev_err(&adapter->pdev->dev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + dev_err(&adapter->pdev->dev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (!edata->advertised || (edata->advertised & + ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { + dev_err(&adapter->pdev->dev, + "EEE Advertisement supports only 100Tx and/or 100T full duplex\n"); + return -EINVAL; + } + adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); + adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); + + } else if (!edata->eee_enabled) { + dev_err(&adapter->pdev->dev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { + hw->dev_spec._82575.eee_disable = !edata->eee_enabled; + adapter->flags |= IGB_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + } + + if (hw->mac.type == e1000_i354) + ret_val = igb_set_eee_i354(hw, adv1g_eee, adv100m_eee); + else + ret_val = igb_set_eee_i350(hw, adv1g_eee, adv100m_eee); + + if (ret_val) { + dev_err(&adapter->pdev->dev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + return 0; +} + +static int igb_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status = 0; + u16 sff8472_rev, addr_mode; + bool page_swap = false; + + if ((hw->phy.media_type == e1000_media_type_copper) || + (hw->phy.media_type == e1000_media_type_unknown)) + return -EOPNOTSUPP; + + /* Check whether we support SFF-8472 or not */ + status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); + if (status) + return -EIO; + + /* addressing mode is not supported */ + status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); + if (status) + return -EIO; + + /* addressing mode is not supported */ + if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { + hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { + /* We have an SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int igb_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status = 0; + u16 *dataword; + u16 first_word, last_word; + int i = 0; + + if (ee->len == 0) + return -EINVAL; + + first_word = ee->offset >> 1; + last_word = (ee->offset + ee->len - 1) >> 1; + + dataword = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!dataword) + return -ENOMEM; + + /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ + for (i = 0; i < last_word - first_word + 1; i++) { + status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2, + &dataword[i]); + if (status) { + /* Error occurred while reading module */ + kfree(dataword); + return -EIO; + } + + be16_to_cpus(&dataword[i]); + } + + memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len); + kfree(dataword); + + return 0; +} + +static int igb_ethtool_begin(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igb_ethtool_complete(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_put(&adapter->pdev->dev); +} + +static u32 igb_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGB_RETA_SIZE; +} + +static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGB_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +void igb_write_rss_indir_tbl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg = E1000_RETA(0); + u32 shift = 0; + int i = 0; + + switch (hw->mac.type) { + case e1000_82575: + shift = 6; + break; + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + shift = 3; + break; + default: + break; + } + + while (i < IGB_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int i; + u32 num_queues; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_queues = 2; + break; + default: + break; + } + + /* Verify user input. */ + for (i = 0; i < IGB_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + + for (i = 0; i < IGB_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igb_write_rss_indir_tbl(adapter); + + return 0; +} + +static unsigned int igb_max_channels(struct igb_adapter *adapter) +{ + return igb_get_max_rss_queues(adapter); +} + +static void igb_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igb_max_channels(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igb_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igb_max_channels(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igb_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igb_reinit_queues(adapter); + } + + return 0; +} + +static u32 igb_get_priv_flags(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGB_FLAG_RX_LEGACY) + priv_flags |= IGB_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGB_FLAG_RX_LEGACY; + if (priv_flags & IGB_PRIV_FLAGS_LEGACY_RX) + flags |= IGB_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igb_reinit_locked(adapter); + } + + return 0; +} + +static const struct ethtool_ops igb_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igb_get_drvinfo, + .get_regs_len = igb_get_regs_len, + .get_regs = igb_get_regs, + .get_wol = igb_get_wol, + .set_wol = igb_set_wol, + .get_msglevel = igb_get_msglevel, + .set_msglevel = igb_set_msglevel, + .nway_reset = igb_nway_reset, + .get_link = igb_get_link, + .get_eeprom_len = igb_get_eeprom_len, + .get_eeprom = igb_get_eeprom, + .set_eeprom = igb_set_eeprom, + .get_ringparam = igb_get_ringparam, + .set_ringparam = igb_set_ringparam, + .get_pauseparam = igb_get_pauseparam, + .set_pauseparam = igb_set_pauseparam, + .self_test = igb_diag_test, + .get_strings = igb_get_strings, + .set_phys_id = igb_set_phys_id, + .get_sset_count = igb_get_sset_count, + .get_ethtool_stats = igb_get_ethtool_stats, + .get_coalesce = igb_get_coalesce, + .set_coalesce = igb_set_coalesce, + .get_ts_info = igb_get_ts_info, + .get_rxnfc = igb_get_rxnfc, + .set_rxnfc = igb_set_rxnfc, + .get_eee = igb_get_eee, + .set_eee = igb_set_eee, + .get_module_info = igb_get_module_info, + .get_module_eeprom = igb_get_module_eeprom, + .get_rxfh_indir_size = igb_get_rxfh_indir_size, + .get_rxfh = igb_get_rxfh, + .set_rxfh = igb_set_rxfh, + .get_channels = igb_get_channels, + .set_channels = igb_set_channels, + .get_priv_flags = igb_get_priv_flags, + .set_priv_flags = igb_set_priv_flags, + .begin = igb_ethtool_begin, + .complete = igb_ethtool_complete, + .get_link_ksettings = igb_get_link_ksettings, + .set_link_ksettings = igb_set_link_ksettings, +}; + +void igb_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igb_ethtool_ops; +} diff --git a/devices/igb/igb_ethtool-6.1-orig.c b/devices/igb/igb_ethtool-6.1-orig.c new file mode 100644 index 00000000..96fa1c42 --- /dev/null +++ b/devices/igb/igb_ethtool-6.1-orig.c @@ -0,0 +1,3507 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +/* ethtool support for igb */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igb.h" + +struct igb_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGB_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igb_adapter, _stat), \ + .stat_offset = offsetof(struct igb_adapter, _stat) \ +} +static const struct igb_stats igb_gstrings_stats[] = { + IGB_STAT("rx_packets", stats.gprc), + IGB_STAT("tx_packets", stats.gptc), + IGB_STAT("rx_bytes", stats.gorc), + IGB_STAT("tx_bytes", stats.gotc), + IGB_STAT("rx_broadcast", stats.bprc), + IGB_STAT("tx_broadcast", stats.bptc), + IGB_STAT("rx_multicast", stats.mprc), + IGB_STAT("tx_multicast", stats.mptc), + IGB_STAT("multicast", stats.mprc), + IGB_STAT("collisions", stats.colc), + IGB_STAT("rx_crc_errors", stats.crcerrs), + IGB_STAT("rx_no_buffer_count", stats.rnbc), + IGB_STAT("rx_missed_errors", stats.mpc), + IGB_STAT("tx_aborted_errors", stats.ecol), + IGB_STAT("tx_carrier_errors", stats.tncrs), + IGB_STAT("tx_window_errors", stats.latecol), + IGB_STAT("tx_abort_late_coll", stats.latecol), + IGB_STAT("tx_deferred_ok", stats.dc), + IGB_STAT("tx_single_coll_ok", stats.scc), + IGB_STAT("tx_multi_coll_ok", stats.mcc), + IGB_STAT("tx_timeout_count", tx_timeout_count), + IGB_STAT("rx_long_length_errors", stats.roc), + IGB_STAT("rx_short_length_errors", stats.ruc), + IGB_STAT("rx_align_errors", stats.algnerrc), + IGB_STAT("tx_tcp_seg_good", stats.tsctc), + IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGB_STAT("rx_flow_control_xon", stats.xonrxc), + IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGB_STAT("tx_flow_control_xon", stats.xontxc), + IGB_STAT("tx_flow_control_xoff", stats.xofftxc), + IGB_STAT("rx_long_byte_count", stats.gorc), + IGB_STAT("tx_dma_out_of_sync", stats.doosync), + IGB_STAT("tx_smbus", stats.mgptc), + IGB_STAT("rx_smbus", stats.mgprc), + IGB_STAT("dropped_smbus", stats.mgpdc), + IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGB_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +#define IGB_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} +static const struct igb_stats igb_gstrings_net_stats[] = { + IGB_NETDEV_STAT(rx_errors), + IGB_NETDEV_STAT(tx_errors), + IGB_NETDEV_STAT(tx_dropped), + IGB_NETDEV_STAT(rx_length_errors), + IGB_NETDEV_STAT(rx_over_errors), + IGB_NETDEV_STAT(rx_frame_errors), + IGB_NETDEV_STAT(rx_fifo_errors), + IGB_NETDEV_STAT(tx_fifo_errors), + IGB_NETDEV_STAT(tx_heartbeat_errors) +}; + +#define IGB_GLOBAL_STATS_LEN \ + (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) +#define IGB_NETDEV_STATS_LEN \ + (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) +#define IGB_RX_QUEUE_STATS_LEN \ + (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) + +#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ + +#define IGB_QUEUE_STATS_LEN \ + ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGB_RX_QUEUE_STATS_LEN) + \ + (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGB_TX_QUEUE_STATS_LEN)) +#define IGB_STATS_LEN \ + (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) + +enum igb_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; +#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) + +static const char igb_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGB_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGB_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igb_priv_flags_strings) + +static int igb_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u32 status; + u32 speed; + u32 supported, advertising; + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(E1000_STATUS); + if (hw->phy.media_type == e1000_media_type_copper) { + + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause); + advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + advertising |= hw->phy.autoneg_advertised; + } + + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + } else { + supported = (SUPPORTED_FIBRE | + SUPPORTED_1000baseKX_Full | + SUPPORTED_Autoneg | + SUPPORTED_Pause); + advertising = (ADVERTISED_FIBRE | + ADVERTISED_1000baseKX_Full); + if (hw->mac.type == e1000_i354) { + if ((hw->device_id == + E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + supported |= SUPPORTED_2500baseX_Full; + supported &= ~SUPPORTED_1000baseKX_Full; + advertising |= ADVERTISED_2500baseX_Full; + advertising &= ~ADVERTISED_1000baseKX_Full; + } + } + if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) { + supported |= SUPPORTED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; + } + if (hw->mac.autoneg == 1) + advertising |= ADVERTISED_Autoneg; + + cmd->base.port = PORT_FIBRE; + } + if (hw->mac.autoneg != 1) + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + + switch (hw->fc.requested_mode) { + case e1000_fc_full: + advertising |= ADVERTISED_Pause; + break; + case e1000_fc_rx_pause: + advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + break; + case e1000_fc_tx_pause: + advertising |= ADVERTISED_Asym_Pause; + break; + default: + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + if (status & E1000_STATUS_LU) { + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + speed = SPEED_2500; + } else if (status & E1000_STATUS_SPEED_1000) { + speed = SPEED_1000; + } else if (status & E1000_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == e1000_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int igb_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 advertising; + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed + */ + if (igb_check_reset_block(hw)) { + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when SoL/IDER is active.\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (cmd->base.autoneg != AUTONEG_ENABLE)) { + dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->phy.media_type == e1000_media_type_fiber) { + hw->phy.autoneg_advertised = advertising | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + switch (adapter->link_speed) { + case SPEED_2500: + hw->phy.autoneg_advertised = + ADVERTISED_2500baseX_Full; + break; + case SPEED_1000: + hw->phy.autoneg_advertised = + ADVERTISED_1000baseT_Full; + break; + case SPEED_100: + hw->phy.autoneg_advertised = + ADVERTISED_100baseT_Full; + break; + default: + break; + } + } else { + hw->phy.autoneg_advertised = advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } + advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { + u32 speed = cmd->base.speed; + /* calling this overrides forced MDI setting */ + if (igb_set_spd_dplx(adapter, speed, cmd->base.duplex)) { + clear_bit(__IGB_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + return 0; +} + +static u32 igb_get_link(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igb_has_link(adapter); +} + +static void igb_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == e1000_fc_rx_pause) + pause->rx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_tx_pause) + pause->tx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igb_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + int i; + + /* 100basefx does not support setting link flow control */ + if (hw->dev_spec._82575.eth_flags.e100_base_fx) + return -EINVAL; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = e1000_fc_default; + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else { + igb_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == e1000_media_type_copper) ? + igb_force_mac_fc(hw) : igb_setup_link(hw)); + + /* Make sure SRRCTL considers new fc settings for each ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + igb_setup_srrctl(adapter, ring); + } + } + + clear_bit(__IGB_RESETTING, &adapter->state); + return retval; +} + +static u32 igb_get_msglevel(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void igb_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int igb_get_regs_len(struct net_device *netdev) +{ +#define IGB_REGS_LEN 740 + return IGB_REGS_LEN * sizeof(u32); +} + +static void igb_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGB_REGS_LEN * sizeof(u32)); + + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(E1000_CTRL); + regs_buff[1] = rd32(E1000_STATUS); + regs_buff[2] = rd32(E1000_CTRL_EXT); + regs_buff[3] = rd32(E1000_MDIC); + regs_buff[4] = rd32(E1000_SCTL); + regs_buff[5] = rd32(E1000_CONNSW); + regs_buff[6] = rd32(E1000_VET); + regs_buff[7] = rd32(E1000_LEDCTL); + regs_buff[8] = rd32(E1000_PBA); + regs_buff[9] = rd32(E1000_PBS); + regs_buff[10] = rd32(E1000_FRTIMER); + regs_buff[11] = rd32(E1000_TCPTIMER); + + /* NVM Register */ + regs_buff[12] = rd32(E1000_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[13] = rd32(E1000_EICS); + regs_buff[14] = rd32(E1000_EICS); + regs_buff[15] = rd32(E1000_EIMS); + regs_buff[16] = rd32(E1000_EIMC); + regs_buff[17] = rd32(E1000_EIAC); + regs_buff[18] = rd32(E1000_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[19] = rd32(E1000_ICS); + regs_buff[20] = rd32(E1000_ICS); + regs_buff[21] = rd32(E1000_IMS); + regs_buff[22] = rd32(E1000_IMC); + regs_buff[23] = rd32(E1000_IAC); + regs_buff[24] = rd32(E1000_IAM); + regs_buff[25] = rd32(E1000_IMIRVP); + + /* Flow Control */ + regs_buff[26] = rd32(E1000_FCAL); + regs_buff[27] = rd32(E1000_FCAH); + regs_buff[28] = rd32(E1000_FCTTV); + regs_buff[29] = rd32(E1000_FCRTL); + regs_buff[30] = rd32(E1000_FCRTH); + regs_buff[31] = rd32(E1000_FCRTV); + + /* Receive */ + regs_buff[32] = rd32(E1000_RCTL); + regs_buff[33] = rd32(E1000_RXCSUM); + regs_buff[34] = rd32(E1000_RLPML); + regs_buff[35] = rd32(E1000_RFCTL); + regs_buff[36] = rd32(E1000_MRQC); + regs_buff[37] = rd32(E1000_VT_CTL); + + /* Transmit */ + regs_buff[38] = rd32(E1000_TCTL); + regs_buff[39] = rd32(E1000_TCTL_EXT); + regs_buff[40] = rd32(E1000_TIPG); + regs_buff[41] = rd32(E1000_DTXCTL); + + /* Wake Up */ + regs_buff[42] = rd32(E1000_WUC); + regs_buff[43] = rd32(E1000_WUFC); + regs_buff[44] = rd32(E1000_WUS); + regs_buff[45] = rd32(E1000_IPAV); + regs_buff[46] = rd32(E1000_WUPL); + + /* MAC */ + regs_buff[47] = rd32(E1000_PCS_CFG0); + regs_buff[48] = rd32(E1000_PCS_LCTL); + regs_buff[49] = rd32(E1000_PCS_LSTAT); + regs_buff[50] = rd32(E1000_PCS_ANADV); + regs_buff[51] = rd32(E1000_PCS_LPAB); + regs_buff[52] = rd32(E1000_PCS_NPTX); + regs_buff[53] = rd32(E1000_PCS_LPABNP); + + /* Statistics */ + regs_buff[54] = adapter->stats.crcerrs; + regs_buff[55] = adapter->stats.algnerrc; + regs_buff[56] = adapter->stats.symerrs; + regs_buff[57] = adapter->stats.rxerrc; + regs_buff[58] = adapter->stats.mpc; + regs_buff[59] = adapter->stats.scc; + regs_buff[60] = adapter->stats.ecol; + regs_buff[61] = adapter->stats.mcc; + regs_buff[62] = adapter->stats.latecol; + regs_buff[63] = adapter->stats.colc; + regs_buff[64] = adapter->stats.dc; + regs_buff[65] = adapter->stats.tncrs; + regs_buff[66] = adapter->stats.sec; + regs_buff[67] = adapter->stats.htdpmc; + regs_buff[68] = adapter->stats.rlec; + regs_buff[69] = adapter->stats.xonrxc; + regs_buff[70] = adapter->stats.xontxc; + regs_buff[71] = adapter->stats.xoffrxc; + regs_buff[72] = adapter->stats.xofftxc; + regs_buff[73] = adapter->stats.fcruc; + regs_buff[74] = adapter->stats.prc64; + regs_buff[75] = adapter->stats.prc127; + regs_buff[76] = adapter->stats.prc255; + regs_buff[77] = adapter->stats.prc511; + regs_buff[78] = adapter->stats.prc1023; + regs_buff[79] = adapter->stats.prc1522; + regs_buff[80] = adapter->stats.gprc; + regs_buff[81] = adapter->stats.bprc; + regs_buff[82] = adapter->stats.mprc; + regs_buff[83] = adapter->stats.gptc; + regs_buff[84] = adapter->stats.gorc; + regs_buff[86] = adapter->stats.gotc; + regs_buff[88] = adapter->stats.rnbc; + regs_buff[89] = adapter->stats.ruc; + regs_buff[90] = adapter->stats.rfc; + regs_buff[91] = adapter->stats.roc; + regs_buff[92] = adapter->stats.rjc; + regs_buff[93] = adapter->stats.mgprc; + regs_buff[94] = adapter->stats.mgpdc; + regs_buff[95] = adapter->stats.mgptc; + regs_buff[96] = adapter->stats.tor; + regs_buff[98] = adapter->stats.tot; + regs_buff[100] = adapter->stats.tpr; + regs_buff[101] = adapter->stats.tpt; + regs_buff[102] = adapter->stats.ptc64; + regs_buff[103] = adapter->stats.ptc127; + regs_buff[104] = adapter->stats.ptc255; + regs_buff[105] = adapter->stats.ptc511; + regs_buff[106] = adapter->stats.ptc1023; + regs_buff[107] = adapter->stats.ptc1522; + regs_buff[108] = adapter->stats.mptc; + regs_buff[109] = adapter->stats.bptc; + regs_buff[110] = adapter->stats.tsctc; + regs_buff[111] = adapter->stats.iac; + regs_buff[112] = adapter->stats.rpthc; + regs_buff[113] = adapter->stats.hgptc; + regs_buff[114] = adapter->stats.hgorc; + regs_buff[116] = adapter->stats.hgotc; + regs_buff[118] = adapter->stats.lenerrs; + regs_buff[119] = adapter->stats.scvpc; + regs_buff[120] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[129 + i] = rd32(E1000_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[133 + i] = rd32(E1000_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[137 + i] = rd32(E1000_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[141 + i] = rd32(E1000_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[145 + i] = rd32(E1000_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[153 + i] = rd32(E1000_EITR(i)); + for (i = 0; i < 8; i++) + regs_buff[163 + i] = rd32(E1000_IMIR(i)); + for (i = 0; i < 8; i++) + regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); + for (i = 0; i < 16; i++) + regs_buff[179 + i] = rd32(E1000_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[195 + i] = rd32(E1000_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[211 + i] = rd32(E1000_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[215 + i] = rd32(E1000_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[219 + i] = rd32(E1000_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[223 + i] = rd32(E1000_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[227 + i] = rd32(E1000_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); + + for (i = 0; i < 4; i++) + regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); + for (i = 0; i < 32; i++) + regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); + + regs_buff[547] = rd32(E1000_TDFH); + regs_buff[548] = rd32(E1000_TDFT); + regs_buff[549] = rd32(E1000_TDFHS); + regs_buff[550] = rd32(E1000_TDFPC); + + if (hw->mac.type > e1000_82580) { + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; + } + + if (hw->mac.type == e1000_82576) { + for (i = 0; i < 12; i++) + regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); + for (i = 0; i < 4; i++) + regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); + + for (i = 0; i < 12; i++) + regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); + } + + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) + regs_buff[739] = rd32(E1000_I210_RR2DCDELAY); +} + +static int igb_get_eeprom_len(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int igb_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igb_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if ((hw->mac.type >= e1000_i210) && + !igb_get_flash_presence_i210(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + if (ret_val) + goto out; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + igb_set_fw_version(adapter); +out: + kfree(eeprom_buff); + return ret_val; +} + +static void igb_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + strscpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); + + /* EEPROM image version # is reported as firmware version # for + * 82575 controllers + */ + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN; +} + +static void igb_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGB_MAX_RXD; + ring->tx_max_pending = IGB_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int igb_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_ring *temp_ring; + int i, err = 0; + u16 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igb_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igb_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igb_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igb_ring)); + + temp_ring[i].count = new_tx_count; + err = igb_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igb_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igb_ring)); + + temp_ring[i].count = new_rx_count; + err = igb_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igb_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igb_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGB_RESETTING, &adapter->state); + return err; +} + +/* ethtool register test data */ +struct igb_reg_test { + u16 reg; + u16 reg_offset; + u16 array_len; + u16 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x100 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* i210 reg test */ +static struct igb_reg_test reg_test_i210[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i210, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0, 0 } +}; + +/* i350 reg test */ +static struct igb_reg_test reg_test_i350[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i350, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82580 reg test */ +static struct igb_reg_test reg_test_82580[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* RDH is read-only for 82580, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82576 reg test */ +static struct igb_reg_test reg_test_82576[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* Enable all RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82576, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82575 register test */ +static struct igb_reg_test reg_test_82575[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82575, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pat, val; + static const u32 _test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { + wr32(reg, (_test[pat] & write)); + val = rd32(reg) & mask; + if (val != (_test[pat] & write & mask)) { + dev_err(&adapter->pdev->dev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, (_test[pat] & write & mask)); + *data = reg; + return true; + } + } + + return false; +} + +static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val; + + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + dev_err(&adapter->pdev->dev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + return true; + } + + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int igb_reg_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_test *test; + u32 value, before, after; + u32 i, toggle; + + switch (adapter->hw.mac.type) { + case e1000_i350: + case e1000_i354: + test = reg_test_i350; + toggle = 0x7FEFF3FF; + break; + case e1000_i210: + case e1000_i211: + test = reg_test_i210; + toggle = 0x7FEFF3FF; + break; + case e1000_82580: + test = reg_test_82580; + toggle = 0x7FEFF3FF; + break; + case e1000_82576: + test = reg_test_82576; + toggle = 0x7FFFF3FF; + break; + default: + test = reg_test_82575; + toggle = 0x7FFFF3FF; + break; + } + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writable on newer MACs. + */ + before = rd32(E1000_STATUS); + value = (rd32(E1000_STATUS) & toggle); + wr32(E1000_STATUS, toggle); + after = rd32(E1000_STATUS) & toggle; + if (value != after) { + dev_err(&adapter->pdev->dev, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ + wr32(E1000_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + REG_PATTERN_TEST(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case SET_READ_TEST: + REG_SET_AND_CHECK(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + writel(test->write, + (adapter->hw.hw_addr + test->reg) + + (i * test->reg_offset)); + break; + case TABLE32_TEST: + REG_PATTERN_TEST(test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + REG_PATTERN_TEST(test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + REG_PATTERN_TEST((test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + } + test++; + } + + *data = 0; + return 0; +} + +static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* Validate eeprom on all parts but flashless */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + } + break; + default: + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + break; + } + + return *data; +} + +static irqreturn_t igb_test_intr(int irq, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *) data; + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= rd32(E1000_ICR); + + return IRQ_HANDLED; +} + +static int igb_intr_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 mask, ics_mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + if (request_irq(adapter->msix_entries[0].vector, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8); + wr32(E1000_EIMS, BIT(0)); + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { + shared_int = false; + if (request_irq(irq, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, + netdev->name, adapter)) { + shared_int = false; + } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, + netdev->name, adapter)) { + *data = 1; + return -1; + } + dev_info(&adapter->pdev->dev, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + usleep_range(10000, 11000); + + /* Define all writable bits for ICS */ + switch (hw->mac.type) { + case e1000_82575: + ics_mask = 0x37F47EDD; + break; + case e1000_82576: + ics_mask = 0x77D4FBFD; + break; + case e1000_82580: + ics_mask = 0x77DCFED5; + break; + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + ics_mask = 0x77DCFED5; + break; + default: + ics_mask = 0x7FFFFFFF; + break; + } + + /* Test each interrupt */ + for (; i < 31; i++) { + /* Interrupt to test */ + mask = BIT(i); + + if (!(mask & ics_mask)) + continue; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, mask); + wr32(E1000_ICS, mask); + wrfl(); + usleep_range(10000, 11000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMS, mask); + wr32(E1000_ICS, mask); + wrfl(); + usleep_range(10000, 11000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, ~mask); + wr32(E1000_ICS, ~mask); + wrfl(); + usleep_range(10000, 11000); + + if (adapter->test_icr & mask) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + usleep_range(10000, 11000); + + /* Unhook test interrupt handler */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) + free_irq(adapter->msix_entries[0].vector, adapter); + else + free_irq(irq, adapter); + + return *data; +} + +static void igb_free_desc_rings(struct igb_adapter *adapter) +{ + igb_free_tx_resources(&adapter->test_tx_ring); + igb_free_rx_resources(&adapter->test_rx_ring); +} + +static int igb_setup_desc_rings(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + struct e1000_hw *hw = &adapter->hw; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IGB_DEFAULT_TXD; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_tx_resources(tx_ring)) { + ret_val = 1; + goto err_nomem; + } + + igb_setup_tctl(adapter); + igb_configure_tx_ring(adapter, tx_ring); + + /* Setup Rx descriptor ring and Rx buffers */ + rx_ring->count = IGB_DEFAULT_RXD; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_rx_resources(rx_ring)) { + ret_val = 3; + goto err_nomem; + } + + /* set the default queue to queue 0 of PF */ + wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); + + /* enable receive ring */ + igb_setup_rctl(adapter); + igb_configure_rx_ring(adapter, rx_ring); + + igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); + + return 0; + +err_nomem: + igb_free_desc_rings(adapter); + return ret_val; +} + +static void igb_phy_disable_receiver(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + igb_write_phy_reg(hw, 29, 0x001F); + igb_write_phy_reg(hw, 30, 0x8FFC); + igb_write_phy_reg(hw, 29, 0x001A); + igb_write_phy_reg(hw, 30, 0x8FF0); +} + +static int igb_integrated_phy_loopback(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + + hw->mac.autoneg = false; + + if (hw->phy.type == e1000_phy_m88) { + if (hw->phy.id != I210_I_PHY_ID) { + /* Auto-MDI/MDIX Off */ + igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else { + /* force 1000, set loopback */ + igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + } + } else if (hw->phy.type == e1000_phy_82580) { + /* enable MII loopback */ + igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); + } + + /* add small delay to avoid loopback test failure */ + msleep(50); + + /* force 1000, set loopback */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD | /* Force Duplex to FULL */ + E1000_CTRL_SLU); /* Set link up enable bit */ + + if (hw->phy.type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + + wr32(E1000_CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy.type == e1000_phy_m88) + igb_phy_disable_receiver(adapter); + + msleep(500); + return 0; +} + +static int igb_set_phy_loopback(struct igb_adapter *adapter) +{ + return igb_integrated_phy_loopback(adapter); +} + +static int igb_setup_loopback_test(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + reg = rd32(E1000_CTRL_EXT); + + /* use CTRL_EXT to identify link type as SGMII can appear as copper */ + if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII) || + (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { + /* Enable DH89xxCC MPHY for near end loopback */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + reg = rd32(E1000_RCTL); + reg |= E1000_RCTL_LBM_TCVR; + wr32(E1000_RCTL, reg); + + wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); + + reg = rd32(E1000_CTRL); + reg &= ~(E1000_CTRL_RFCE | + E1000_CTRL_TFCE | + E1000_CTRL_LRST); + reg |= E1000_CTRL_SLU | + E1000_CTRL_FD; + wr32(E1000_CTRL, reg); + + /* Unset switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg &= ~E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + + /* Unset sigdetect for SERDES loopback on + * 82580 and newer devices. + */ + if (hw->mac.type >= e1000_82580) { + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_IGN_SD; + wr32(E1000_PCS_CFG0, reg); + } + + /* Set PCS register for forced speed */ + reg = rd32(E1000_PCS_LCTL); + reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ + reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ + E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ + E1000_PCS_LCTL_FSD | /* Force Speed */ + E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ + wr32(E1000_PCS_LCTL, reg); + + return 0; + } + + return igb_set_phy_loopback(adapter); +} + +static void igb_loopback_cleanup(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII)) { + u32 reg; + + /* Disable near end loopback on DH89xxCC */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + rctl = rd32(E1000_RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + wr32(E1000_RCTL, rctl); + + hw->mac.autoneg = true; + igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); + igb_phy_sw_reset(hw); + } +} + +static void igb_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size /= 2; + memset(&skb->data[frame_size], 0xAA, frame_size - 1); + skb->data[frame_size + 10] = 0xBE; + skb->data[frame_size + 12] = 0xAF; +} + +static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + + data = kmap_local_page(rx_buffer->page); + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap_local(data); + + return match; +} + +static int igb_clean_test_rings(struct igb_ring *rx_ring, + struct igb_ring *tx_ring, + unsigned int size) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *rx_buffer_info; + struct igb_tx_buffer *tx_buffer_info; + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + + while (rx_desc->wb.upper.length) { + /* check Rx buffer */ + rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer_info->dma, + size, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (igb_check_lbtest_frame(rx_buffer_info, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer_info->dma, + size, + DMA_FROM_DEVICE); + + /* unmap buffer on Tx side */ + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer_info->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer_info, dma), + dma_unmap_len(tx_buffer_info, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer_info, len, 0); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* re-map buffers to ring, store next to clean values */ + igb_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int igb_run_loopback_test(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + u16 i, j, lc, good_cnt; + int ret_val = 0; + unsigned int size = IGB_RX_HDR_LEN; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + igb_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { /* loop count loop */ + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + break; + } + } /* end loop count loop */ + + /* free the original skb */ + kfree_skb(skb); + + return ret_val; +} + +static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) +{ + /* PHY loopback cannot be performed if SoL/IDER + * sessions are active + */ + if (igb_check_reset_block(&adapter->hw)) { + dev_err(&adapter->pdev->dev, + "Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + if (adapter->hw.mac.type == e1000_i354) { + dev_info(&adapter->pdev->dev, + "Loopback test not supported on i354.\n"); + *data = 0; + goto out; + } + *data = igb_setup_desc_rings(adapter); + if (*data) + goto out; + *data = igb_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = igb_run_loopback_test(adapter); + igb_loopback_cleanup(adapter); + +err_loopback: + igb_free_desc_rings(adapter); +out: + return *data; +} + +static int igb_link_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->mac.serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.autoneg) + msleep(5000); + + if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static void igb_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex, autoneg; + bool if_running = netif_running(netdev); + + set_bit(__IGB_TESTING, &adapter->state); + + /* can't do offline tests on media switching devices */ + if (adapter->hw.dev_spec._82575.mas_capable) + eth_test->flags &= ~ETH_TEST_FL_OFFLINE; + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + dev_info(&adapter->pdev->dev, "offline testing starting\n"); + + /* power up link for link test */ + igb_power_up_link(adapter); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (igb_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + igb_close(netdev); + else + igb_reset(adapter); + + if (igb_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_intr_test(adapter, &data[TEST_IRQ])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + /* power up link for loopback test */ + igb_power_up_link(adapter); + if (igb_loopback_test(adapter, &data[TEST_LOOP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = true; + igb_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = false; + + clear_bit(__IGB_TESTING, &adapter->state); + if (if_running) + igb_open(netdev); + } else { + dev_info(&adapter->pdev->dev, "online testing starting\n"); + + /* PHY is powered down when interface is down */ + if (if_running && igb_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + else + data[TEST_LINK] = 0; + + /* Online tests aren't run; pass by default */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + clear_bit(__IGB_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & E1000_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= E1000_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +/* bit defines for adapter->led_status */ +#define IGB_LED_ON 0 + +static int igb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + igb_blink_led(hw); + return 2; + case ETHTOOL_ID_ON: + igb_blink_led(hw); + break; + case ETHTOOL_ID_OFF: + igb_led_off(hw); + break; + case ETHTOOL_ID_INACTIVE: + igb_led_off(hw); + clear_bit(IGB_LED_ON, &adapter->led_status); + igb_cleanup_led(hw); + break; + } + + return 0; +} + +static int igb_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->tx_coalesce_usecs > 3) && + (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->tx_coalesce_usecs == 2)) + return -EINVAL; + + if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGB_FLAG_DMAC) + adapter->flags &= ~IGB_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +static int igb_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igb_nway_reset(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + igb_reinit_locked(adapter); + return 0; +} + +static int igb_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGB_STATS_LEN; + case ETH_SS_TEST: + return IGB_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGB_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igb_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igb_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igb_gstrings_stats[i].stat_offset; + data[i] = (igb_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; + data[i] = (igb_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i+1] = ring->tx_stats.bytes; + data[i+2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + data[i+2] += restart2; + + i += IGB_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i+1] = ring->rx_stats.bytes; + data[i+2] = ring->rx_stats.drops; + data[i+3] = ring->rx_stats.csum_err; + data[i+4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + i += IGB_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, igb_gstrings_test, sizeof(igb_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_stats[i].stat_string); + for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igb_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igb_priv_flags_strings, + IGB_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igb_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case e1000_82575: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + + /* 82576 does not support timestamping all packets. */ + if (adapter->hw.mac.type >= e1000_82580) + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + else + info->rx_filters |= + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igb_nfc_filter *rule = NULL; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + if (rule->filter.match_flags) { + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + if (rule->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = rule->filter.etype; + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = rule->filter.vlan_tci; + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + return 0; + } + return -EINVAL; +} + +static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_nfc_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igb_get_rss_hash_opts(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igb */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = igb_get_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = igb_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ + IGB_FLAG_RSS_FIELD_IPV6_UDP) +static int igb_set_rss_hash_opt(struct igb_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct e1000_hw *hw = &adapter->hw; + u32 mrqc = rd32(E1000_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + dev_err(&adapter->pdev->dev, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | + E1000_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(E1000_MRQC, mrqc); + } + + return 0; +} + +static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 i; + u32 etqf; + u16 etype; + + /* find an empty etype filter register */ + for (i = 0; i < MAX_ETYPE_FILTER; ++i) { + if (!adapter->etype_bitmap[i]) + break; + } + if (i == MAX_ETYPE_FILTER) { + dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); + return -EINVAL; + } + + adapter->etype_bitmap[i] = true; + + etqf = rd32(E1000_ETQF(i)); + etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); + + etqf |= E1000_ETQF_FILTER_ENABLE; + etqf &= ~E1000_ETQF_ETYPE_MASK; + etqf |= (etype & E1000_ETQF_ETYPE_MASK); + + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT) + & E1000_ETQF_QUEUE_MASK); + etqf |= E1000_ETQF_QUEUE_ENABLE; + + wr32(E1000_ETQF(i), etqf); + + input->etype_reg_index = i; + + return 0; +} + +static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u16 queue_index; + u32 vlapqf; + + vlapqf = rd32(E1000_VLAPQF); + vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) + >> VLAN_PRIO_SHIFT; + queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK; + + /* check whether this vlan prio is already set */ + if ((vlapqf & E1000_VLAPQF_P_VALID(vlan_priority)) && + (queue_index != input->action)) { + dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); + return -EEXIST; + } + + vlapqf |= E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf |= E1000_VLAPQF_QUEUE_SEL(vlan_priority, input->action); + + wr32(E1000_VLAPQF, vlapqf); + + return 0; +} + +int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + int err = -EINVAL; + + if (hw->mac.type == e1000_i210 && + !(input->filter.match_flags & ~IGB_FILTER_FLAG_SRC_MAC_ADDR)) { + dev_err(&adapter->pdev->dev, + "i210 doesn't support flow classification rules specifying only source addresses.\n"); + return -EOPNOTSUPP; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + err = igb_rxnfc_write_etype_filter(adapter, input); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.dst_addr, + input->action, 0); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + err = igb_rxnfc_write_vlan_prio_filter(adapter, input); + + return err; +} + +static void igb_clear_etype_filter_regs(struct igb_adapter *adapter, + u16 reg_index) +{ + struct e1000_hw *hw = &adapter->hw; + u32 etqf = rd32(E1000_ETQF(reg_index)); + + etqf &= ~E1000_ETQF_QUEUE_ENABLE; + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf &= ~E1000_ETQF_FILTER_ENABLE; + + wr32(E1000_ETQF(reg_index), etqf); + + adapter->etype_bitmap[reg_index] = false; +} + +static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter, + u16 vlan_tci) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u32 vlapqf; + + vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + vlapqf = rd32(E1000_VLAPQF); + vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf &= ~E1000_VLAPQF_QUEUE_SEL(vlan_priority, + E1000_VLAPQF_QUEUE_MASK); + + wr32(E1000_VLAPQF, vlapqf); +} + +int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) + igb_clear_etype_filter_regs(adapter, + input->etype_reg_index); + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + igb_clear_vlan_prio_filter(adapter, + ntohs(input->filter.vlan_tci)); + + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.dst_addr, + input->action, 0); + + return 0; +} + +static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter, + struct igb_nfc_filter *input, + u16 sw_idx) +{ + struct igb_nfc_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + if (!input) + err = igb_erase_filter(adapter, rule); + + hlist_del(&rule->nfc_node); + kfree(rule); + adapter->nfc_filter_count--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node */ + INIT_HLIST_NODE(&input->nfc_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->nfc_node, &parent->nfc_node); + else + hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); + + /* update counts */ + adapter->nfc_filter_count++; + + return 0; +} + +static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igb_nfc_filter *input, *rule; + int err = 0; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + /* Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if ((fsp->ring_cookie == RX_CLS_FLOW_DISC) || + (fsp->ring_cookie >= adapter->num_rx_queues)) { + dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); + return -EINVAL; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= IGB_MAX_RXNFC_FILTERS) { + dev_err(&adapter->pdev->dev, "Location out of range\n"); + return -EINVAL; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + input->filter.etype = fsp->h_u.ether_spec.h_proto; + input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE; + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + err = -EINVAL; + goto err_out; + } + input->filter.vlan_tci = fsp->h_ext.vlan_tci; + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + } + + input->action = fsp->ring_cookie; + input->sw_idx = fsp->location; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&input->filter, &rule->filter, + sizeof(input->filter))) { + err = -EEXIST; + dev_err(&adapter->pdev->dev, + "ethtool: this filter is already set\n"); + goto err_out_w_lock; + } + } + + err = igb_add_filter(adapter, input); + if (err) + goto err_out_w_lock; + + igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->nfc_lock); + return 0; + +err_out_w_lock: + spin_unlock(&adapter->nfc_lock); +err_out: + kfree(input); + return err; +} + +static int igb_del_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->nfc_lock); + err = igb_update_ethtool_nfc_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = igb_set_rss_hash_opt(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = igb_add_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = igb_del_ethtool_nfc_entry(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ret_val; + u16 phy_data; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + edata->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full); + if (!hw->dev_spec._82575.eee_disable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* The IPCNFG and EEER registers are not supported on I354. */ + if (hw->mac.type == e1000_i354) { + igb_get_eee_status_i354(hw, (bool *)&edata->eee_active); + } else { + u32 eeer; + + eeer = rd32(E1000_EEER); + + /* EEE status on negotiated link */ + if (eeer & E1000_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & E1000_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + } + + /* EEE Link Partner Advertised */ + switch (hw->mac.type) { + case e1000_i350: + ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + break; + case e1000_i354: + case e1000_i210: + case e1000_i211: + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, + E1000_EEE_LP_ADV_DEV_I210, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + break; + default: + break; + } + + edata->eee_enabled = !hw->dev_spec._82575.eee_disable; + + if ((hw->mac.type == e1000_i354) && + (edata->eee_enabled)) + edata->tx_lpi_enabled = true; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igb_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + bool adv1g_eee = true, adv100m_eee = true; + s32 ret_val; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igb_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + dev_err(&adapter->pdev->dev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + dev_err(&adapter->pdev->dev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (!edata->advertised || (edata->advertised & + ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { + dev_err(&adapter->pdev->dev, + "EEE Advertisement supports only 100Tx and/or 100T full duplex\n"); + return -EINVAL; + } + adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); + adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); + + } else if (!edata->eee_enabled) { + dev_err(&adapter->pdev->dev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { + hw->dev_spec._82575.eee_disable = !edata->eee_enabled; + adapter->flags |= IGB_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + } + + if (hw->mac.type == e1000_i354) + ret_val = igb_set_eee_i354(hw, adv1g_eee, adv100m_eee); + else + ret_val = igb_set_eee_i350(hw, adv1g_eee, adv100m_eee); + + if (ret_val) { + dev_err(&adapter->pdev->dev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + return 0; +} + +static int igb_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status = 0; + u16 sff8472_rev, addr_mode; + bool page_swap = false; + + if ((hw->phy.media_type == e1000_media_type_copper) || + (hw->phy.media_type == e1000_media_type_unknown)) + return -EOPNOTSUPP; + + /* Check whether we support SFF-8472 or not */ + status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); + if (status) + return -EIO; + + /* addressing mode is not supported */ + status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); + if (status) + return -EIO; + + /* addressing mode is not supported */ + if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { + hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { + /* We have an SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int igb_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status = 0; + u16 *dataword; + u16 first_word, last_word; + int i = 0; + + if (ee->len == 0) + return -EINVAL; + + first_word = ee->offset >> 1; + last_word = (ee->offset + ee->len - 1) >> 1; + + dataword = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!dataword) + return -ENOMEM; + + /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ + for (i = 0; i < last_word - first_word + 1; i++) { + status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2, + &dataword[i]); + if (status) { + /* Error occurred while reading module */ + kfree(dataword); + return -EIO; + } + + be16_to_cpus(&dataword[i]); + } + + memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len); + kfree(dataword); + + return 0; +} + +static int igb_ethtool_begin(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igb_ethtool_complete(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_put(&adapter->pdev->dev); +} + +static u32 igb_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGB_RETA_SIZE; +} + +static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGB_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +void igb_write_rss_indir_tbl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg = E1000_RETA(0); + u32 shift = 0; + int i = 0; + + switch (hw->mac.type) { + case e1000_82575: + shift = 6; + break; + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + shift = 3; + break; + default: + break; + } + + while (i < IGB_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int i; + u32 num_queues; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_queues = 2; + break; + default: + break; + } + + /* Verify user input. */ + for (i = 0; i < IGB_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + + for (i = 0; i < IGB_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igb_write_rss_indir_tbl(adapter); + + return 0; +} + +static unsigned int igb_max_channels(struct igb_adapter *adapter) +{ + return igb_get_max_rss_queues(adapter); +} + +static void igb_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igb_max_channels(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igb_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igb_max_channels(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igb_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igb_reinit_queues(adapter); + } + + return 0; +} + +static u32 igb_get_priv_flags(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGB_FLAG_RX_LEGACY) + priv_flags |= IGB_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGB_FLAG_RX_LEGACY; + if (priv_flags & IGB_PRIV_FLAGS_LEGACY_RX) + flags |= IGB_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igb_reinit_locked(adapter); + } + + return 0; +} + +static const struct ethtool_ops igb_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igb_get_drvinfo, + .get_regs_len = igb_get_regs_len, + .get_regs = igb_get_regs, + .get_wol = igb_get_wol, + .set_wol = igb_set_wol, + .get_msglevel = igb_get_msglevel, + .set_msglevel = igb_set_msglevel, + .nway_reset = igb_nway_reset, + .get_link = igb_get_link, + .get_eeprom_len = igb_get_eeprom_len, + .get_eeprom = igb_get_eeprom, + .set_eeprom = igb_set_eeprom, + .get_ringparam = igb_get_ringparam, + .set_ringparam = igb_set_ringparam, + .get_pauseparam = igb_get_pauseparam, + .set_pauseparam = igb_set_pauseparam, + .self_test = igb_diag_test, + .get_strings = igb_get_strings, + .set_phys_id = igb_set_phys_id, + .get_sset_count = igb_get_sset_count, + .get_ethtool_stats = igb_get_ethtool_stats, + .get_coalesce = igb_get_coalesce, + .set_coalesce = igb_set_coalesce, + .get_ts_info = igb_get_ts_info, + .get_rxnfc = igb_get_rxnfc, + .set_rxnfc = igb_set_rxnfc, + .get_eee = igb_get_eee, + .set_eee = igb_set_eee, + .get_module_info = igb_get_module_info, + .get_module_eeprom = igb_get_module_eeprom, + .get_rxfh_indir_size = igb_get_rxfh_indir_size, + .get_rxfh = igb_get_rxfh, + .set_rxfh = igb_set_rxfh, + .get_channels = igb_get_channels, + .set_channels = igb_set_channels, + .get_priv_flags = igb_get_priv_flags, + .set_priv_flags = igb_set_priv_flags, + .begin = igb_ethtool_begin, + .complete = igb_ethtool_complete, + .get_link_ksettings = igb_get_link_ksettings, + .set_link_ksettings = igb_set_link_ksettings, +}; + +void igb_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igb_ethtool_ops; +} diff --git a/devices/igb/igb_hwmon-5.14-ethercat.c b/devices/igb/igb_hwmon-5.14-ethercat.c new file mode 100644 index 00000000..355497e3 --- /dev/null +++ b/devices/igb/igb_hwmon-5.14-ethercat.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include "igb-5.14-ethercat.h" +#include "e1000_82575-5.14-ethercat.h" +#include "e1000_hw-5.14-ethercat.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_IGB_HWMON +static struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), +}; + +/* hwmon callback functions */ +static ssize_t igb_hwmon_show_location(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + return sprintf(buf, "loc%u\n", + igb_attr->sensor->location); +} + +static ssize_t igb_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw); + + value = igb_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->caution_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @ adapter: pointer to the adapter structure + * @ offset: offset in the eeprom sensor data table + * @ type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int igb_add_hwmon_attr(struct igb_adapter *adapter, + unsigned int offset, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *igb_attr; + + n_attr = adapter->igb_hwmon_buff->n_hwmon; + igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr]; + + switch (type) { + case IGB_HWMON_TYPE_LOC: + igb_attr->dev_attr.show = igb_hwmon_show_location; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_label", offset + 1); + break; + case IGB_HWMON_TYPE_TEMP: + igb_attr->dev_attr.show = igb_hwmon_show_temp; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_input", offset + 1); + break; + case IGB_HWMON_TYPE_CAUTION: + igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_max", offset + 1); + break; + case IGB_HWMON_TYPE_MAX: + igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_crit", offset + 1); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + igb_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor[offset]; + igb_attr->hw = &adapter->hw; + igb_attr->dev_attr.store = NULL; + igb_attr->dev_attr.attr.mode = 0444; + igb_attr->dev_attr.attr.name = igb_attr->name; + sysfs_attr_init(&igb_attr->dev_attr.attr); + + adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr; + + ++adapter->igb_hwmon_buff->n_hwmon; + + return 0; +} + +static void igb_sysfs_del_adapter(struct igb_adapter *adapter) +{ +} + +/* called from igb_main.c */ +void igb_sysfs_exit(struct igb_adapter *adapter) +{ + igb_sysfs_del_adapter(adapter); +} + +/* called from igb_main.c */ +int igb_sysfs_init(struct igb_adapter *adapter) +{ + struct hwmon_buff *igb_hwmon; + struct i2c_client *client; + struct device *hwmon_dev; + unsigned int i; + int rc = 0; + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) + goto exit; + + /* Don't create thermal hwmon interface if no sensors present */ + rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); + if (rc) + goto exit; + + igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon), + GFP_KERNEL); + if (!igb_hwmon) { + rc = -ENOMEM; + goto exit; + } + adapter->igb_hwmon_buff = igb_hwmon; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + + /* Only create hwmon sysfs entries for sensors that have + * meaningful data. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* Bail if any hwmon attr struct fails to initialize */ + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); + if (rc) + goto exit; + } + + /* init i2c_client */ + client = i2c_new_client_device(&adapter->i2c_adap, &i350_sensor_info); + if (IS_ERR(client)) { + dev_info(&adapter->pdev->dev, + "Failed to create new i2c device.\n"); + rc = PTR_ERR(client); + goto exit; + } + adapter->i2c_client = client; + + igb_hwmon->groups[0] = &igb_hwmon->group; + igb_hwmon->group.attrs = igb_hwmon->attrs; + + hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, + client->name, + igb_hwmon, + igb_hwmon->groups); + if (IS_ERR(hwmon_dev)) { + rc = PTR_ERR(hwmon_dev); + goto err; + } + + goto exit; + +err: + igb_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif diff --git a/devices/igb/igb_hwmon-5.14-orig.c b/devices/igb/igb_hwmon-5.14-orig.c new file mode 100644 index 00000000..21a29a0c --- /dev/null +++ b/devices/igb/igb_hwmon-5.14-orig.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include "igb.h" +#include "e1000_82575.h" +#include "e1000_hw.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_IGB_HWMON +static struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), +}; + +/* hwmon callback functions */ +static ssize_t igb_hwmon_show_location(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + return sprintf(buf, "loc%u\n", + igb_attr->sensor->location); +} + +static ssize_t igb_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw); + + value = igb_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->caution_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @ adapter: pointer to the adapter structure + * @ offset: offset in the eeprom sensor data table + * @ type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int igb_add_hwmon_attr(struct igb_adapter *adapter, + unsigned int offset, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *igb_attr; + + n_attr = adapter->igb_hwmon_buff->n_hwmon; + igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr]; + + switch (type) { + case IGB_HWMON_TYPE_LOC: + igb_attr->dev_attr.show = igb_hwmon_show_location; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_label", offset + 1); + break; + case IGB_HWMON_TYPE_TEMP: + igb_attr->dev_attr.show = igb_hwmon_show_temp; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_input", offset + 1); + break; + case IGB_HWMON_TYPE_CAUTION: + igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_max", offset + 1); + break; + case IGB_HWMON_TYPE_MAX: + igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_crit", offset + 1); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + igb_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor[offset]; + igb_attr->hw = &adapter->hw; + igb_attr->dev_attr.store = NULL; + igb_attr->dev_attr.attr.mode = 0444; + igb_attr->dev_attr.attr.name = igb_attr->name; + sysfs_attr_init(&igb_attr->dev_attr.attr); + + adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr; + + ++adapter->igb_hwmon_buff->n_hwmon; + + return 0; +} + +static void igb_sysfs_del_adapter(struct igb_adapter *adapter) +{ +} + +/* called from igb_main.c */ +void igb_sysfs_exit(struct igb_adapter *adapter) +{ + igb_sysfs_del_adapter(adapter); +} + +/* called from igb_main.c */ +int igb_sysfs_init(struct igb_adapter *adapter) +{ + struct hwmon_buff *igb_hwmon; + struct i2c_client *client; + struct device *hwmon_dev; + unsigned int i; + int rc = 0; + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) + goto exit; + + /* Don't create thermal hwmon interface if no sensors present */ + rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); + if (rc) + goto exit; + + igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon), + GFP_KERNEL); + if (!igb_hwmon) { + rc = -ENOMEM; + goto exit; + } + adapter->igb_hwmon_buff = igb_hwmon; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + + /* Only create hwmon sysfs entries for sensors that have + * meaningful data. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* Bail if any hwmon attr struct fails to initialize */ + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); + if (rc) + goto exit; + } + + /* init i2c_client */ + client = i2c_new_client_device(&adapter->i2c_adap, &i350_sensor_info); + if (IS_ERR(client)) { + dev_info(&adapter->pdev->dev, + "Failed to create new i2c device.\n"); + rc = PTR_ERR(client); + goto exit; + } + adapter->i2c_client = client; + + igb_hwmon->groups[0] = &igb_hwmon->group; + igb_hwmon->group.attrs = igb_hwmon->attrs; + + hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, + client->name, + igb_hwmon, + igb_hwmon->groups); + if (IS_ERR(hwmon_dev)) { + rc = PTR_ERR(hwmon_dev); + goto err; + } + + goto exit; + +err: + igb_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif diff --git a/devices/igb/igb_hwmon-6.1-ethercat.c b/devices/igb/igb_hwmon-6.1-ethercat.c new file mode 100644 index 00000000..c759d408 --- /dev/null +++ b/devices/igb/igb_hwmon-6.1-ethercat.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include "igb-6.1-ethercat.h" +#include "e1000_82575-6.1-ethercat.h" +#include "e1000_hw-6.1-ethercat.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_IGB_HWMON +static struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), +}; + +/* hwmon callback functions */ +static ssize_t igb_hwmon_show_location(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + return sprintf(buf, "loc%u\n", + igb_attr->sensor->location); +} + +static ssize_t igb_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw); + + value = igb_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->caution_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @ adapter: pointer to the adapter structure + * @ offset: offset in the eeprom sensor data table + * @ type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int igb_add_hwmon_attr(struct igb_adapter *adapter, + unsigned int offset, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *igb_attr; + + n_attr = adapter->igb_hwmon_buff->n_hwmon; + igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr]; + + switch (type) { + case IGB_HWMON_TYPE_LOC: + igb_attr->dev_attr.show = igb_hwmon_show_location; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_label", offset + 1); + break; + case IGB_HWMON_TYPE_TEMP: + igb_attr->dev_attr.show = igb_hwmon_show_temp; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_input", offset + 1); + break; + case IGB_HWMON_TYPE_CAUTION: + igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_max", offset + 1); + break; + case IGB_HWMON_TYPE_MAX: + igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_crit", offset + 1); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + igb_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor[offset]; + igb_attr->hw = &adapter->hw; + igb_attr->dev_attr.store = NULL; + igb_attr->dev_attr.attr.mode = 0444; + igb_attr->dev_attr.attr.name = igb_attr->name; + sysfs_attr_init(&igb_attr->dev_attr.attr); + + adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr; + + ++adapter->igb_hwmon_buff->n_hwmon; + + return 0; +} + +static void igb_sysfs_del_adapter(struct igb_adapter *adapter) +{ +} + +/* called from igb_main.c */ +void igb_sysfs_exit(struct igb_adapter *adapter) +{ + igb_sysfs_del_adapter(adapter); +} + +/* called from igb_main.c */ +int igb_sysfs_init(struct igb_adapter *adapter) +{ + struct hwmon_buff *igb_hwmon; + struct i2c_client *client; + struct device *hwmon_dev; + unsigned int i; + int rc = 0; + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) + goto exit; + + /* Don't create thermal hwmon interface if no sensors present */ + rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); + if (rc) + goto exit; + + igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon), + GFP_KERNEL); + if (!igb_hwmon) { + rc = -ENOMEM; + goto exit; + } + adapter->igb_hwmon_buff = igb_hwmon; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + + /* Only create hwmon sysfs entries for sensors that have + * meaningful data. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* Bail if any hwmon attr struct fails to initialize */ + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); + if (rc) + goto exit; + } + + /* init i2c_client */ + client = i2c_new_client_device(&adapter->i2c_adap, &i350_sensor_info); + if (IS_ERR(client)) { + dev_info(&adapter->pdev->dev, + "Failed to create new i2c device.\n"); + rc = PTR_ERR(client); + goto exit; + } + adapter->i2c_client = client; + + igb_hwmon->groups[0] = &igb_hwmon->group; + igb_hwmon->group.attrs = igb_hwmon->attrs; + + hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, + client->name, + igb_hwmon, + igb_hwmon->groups); + if (IS_ERR(hwmon_dev)) { + rc = PTR_ERR(hwmon_dev); + goto err; + } + + goto exit; + +err: + igb_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif diff --git a/devices/igb/igb_hwmon-6.1-orig.c b/devices/igb/igb_hwmon-6.1-orig.c new file mode 100644 index 00000000..21a29a0c --- /dev/null +++ b/devices/igb/igb_hwmon-6.1-orig.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#include "igb.h" +#include "e1000_82575.h" +#include "e1000_hw.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_IGB_HWMON +static struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), +}; + +/* hwmon callback functions */ +static ssize_t igb_hwmon_show_location(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + return sprintf(buf, "loc%u\n", + igb_attr->sensor->location); +} + +static ssize_t igb_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw); + + value = igb_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->caution_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @ adapter: pointer to the adapter structure + * @ offset: offset in the eeprom sensor data table + * @ type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int igb_add_hwmon_attr(struct igb_adapter *adapter, + unsigned int offset, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *igb_attr; + + n_attr = adapter->igb_hwmon_buff->n_hwmon; + igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr]; + + switch (type) { + case IGB_HWMON_TYPE_LOC: + igb_attr->dev_attr.show = igb_hwmon_show_location; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_label", offset + 1); + break; + case IGB_HWMON_TYPE_TEMP: + igb_attr->dev_attr.show = igb_hwmon_show_temp; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_input", offset + 1); + break; + case IGB_HWMON_TYPE_CAUTION: + igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_max", offset + 1); + break; + case IGB_HWMON_TYPE_MAX: + igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_crit", offset + 1); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + igb_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor[offset]; + igb_attr->hw = &adapter->hw; + igb_attr->dev_attr.store = NULL; + igb_attr->dev_attr.attr.mode = 0444; + igb_attr->dev_attr.attr.name = igb_attr->name; + sysfs_attr_init(&igb_attr->dev_attr.attr); + + adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr; + + ++adapter->igb_hwmon_buff->n_hwmon; + + return 0; +} + +static void igb_sysfs_del_adapter(struct igb_adapter *adapter) +{ +} + +/* called from igb_main.c */ +void igb_sysfs_exit(struct igb_adapter *adapter) +{ + igb_sysfs_del_adapter(adapter); +} + +/* called from igb_main.c */ +int igb_sysfs_init(struct igb_adapter *adapter) +{ + struct hwmon_buff *igb_hwmon; + struct i2c_client *client; + struct device *hwmon_dev; + unsigned int i; + int rc = 0; + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) + goto exit; + + /* Don't create thermal hwmon interface if no sensors present */ + rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); + if (rc) + goto exit; + + igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon), + GFP_KERNEL); + if (!igb_hwmon) { + rc = -ENOMEM; + goto exit; + } + adapter->igb_hwmon_buff = igb_hwmon; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + + /* Only create hwmon sysfs entries for sensors that have + * meaningful data. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* Bail if any hwmon attr struct fails to initialize */ + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); + if (rc) + goto exit; + } + + /* init i2c_client */ + client = i2c_new_client_device(&adapter->i2c_adap, &i350_sensor_info); + if (IS_ERR(client)) { + dev_info(&adapter->pdev->dev, + "Failed to create new i2c device.\n"); + rc = PTR_ERR(client); + goto exit; + } + adapter->i2c_client = client; + + igb_hwmon->groups[0] = &igb_hwmon->group; + igb_hwmon->group.attrs = igb_hwmon->attrs; + + hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, + client->name, + igb_hwmon, + igb_hwmon->groups); + if (IS_ERR(hwmon_dev)) { + rc = PTR_ERR(hwmon_dev); + goto err; + } + + goto exit; + +err: + igb_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif diff --git a/devices/igb/igb_main-5.14-ethercat.c b/devices/igb/igb_main-5.14-ethercat.c new file mode 100644 index 00000000..29c5656c --- /dev/null +++ b/devices/igb/igb_main-5.14-ethercat.c @@ -0,0 +1,10140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_IGB_DCA +#include +#endif +#include +#include "igb-5.14-ethercat.h" + +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + +enum queue_mode { + QUEUE_MODE_STRICT_PRIORITY, + QUEUE_MODE_STREAM_RESERVATION, +}; + +enum tx_queue_prio { + TX_QUEUE_PRIO_HIGH, + TX_QUEUE_PRIO_LOW, +}; + +char igb_driver_name[] = "ec_igb"; +static const char igb_driver_string[] = + "Intel(R) Gigabit Ethernet Network Driver (EtherCAT enabled)"; +static const char igb_copyright[] = + "Copyright (c) 2007-2014 Intel Corporation."; + +static const struct e1000_info *igb_info_tbl[] = { + [board_82575] = &e1000_82575_info, +}; + +static const struct pci_device_id igb_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, + /* required last entry */ + {0, } +}; + +// MODULE_DEVICE_TABLE(pci, igb_pci_tbl); + +static int igb_setup_all_tx_resources(struct igb_adapter *); +static int igb_setup_all_rx_resources(struct igb_adapter *); +static void igb_free_all_tx_resources(struct igb_adapter *); +static void igb_free_all_rx_resources(struct igb_adapter *); +static void igb_setup_mrqc(struct igb_adapter *); +static int igb_probe(struct pci_dev *, const struct pci_device_id *); +static void igb_remove(struct pci_dev *pdev); +static int igb_sw_init(struct igb_adapter *); +int igb_open(struct net_device *); +int igb_close(struct net_device *); +static void igb_configure(struct igb_adapter *); +static void igb_configure_tx(struct igb_adapter *); +static void igb_configure_rx(struct igb_adapter *); +static void igb_clean_all_tx_rings(struct igb_adapter *); +static void igb_clean_all_rx_rings(struct igb_adapter *); +static void igb_clean_tx_ring(struct igb_ring *); +static void igb_clean_rx_ring(struct igb_ring *); +static void igb_set_rx_mode(struct net_device *); +static void igb_update_phy_info(struct timer_list *); +static void igb_watchdog(struct timer_list *); +static void igb_watchdog_task(struct work_struct *); +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); +static void igb_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static int igb_change_mtu(struct net_device *, int); +static int igb_set_mac(struct net_device *, void *); +static void igb_set_uta(struct igb_adapter *adapter, bool set); +static irqreturn_t igb_intr(int irq, void *); +static irqreturn_t igb_intr_msi(int irq, void *); +static irqreturn_t igb_msix_other(int irq, void *); +static irqreturn_t igb_msix_ring(int irq, void *); +#ifdef CONFIG_IGB_DCA +static void igb_update_dca(struct igb_q_vector *); +static void igb_setup_dca(struct igb_adapter *); +#endif /* CONFIG_IGB_DCA */ +static int igb_poll(struct napi_struct *, int); +static bool igb_clean_tx_irq(struct igb_q_vector *, int); +static int igb_clean_rx_irq(struct igb_q_vector *, int); +static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); +static void igb_tx_timeout(struct net_device *, unsigned int txqueue); +static void igb_reset_task(struct work_struct *); +static void igb_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); +static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); +static void igb_restore_vlan(struct igb_adapter *); +static void igb_rar_set_index(struct igb_adapter *, u32); +static void igb_ping_all_vfs(struct igb_adapter *); +static void igb_msg_task(struct igb_adapter *); +static void igb_vmm_control(struct igb_adapter *); +static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); +static void igb_flush_mac_table(struct igb_adapter *); +static int igb_available_rars(struct igb_adapter *, u8); +static void igb_set_default_mac_filter(struct igb_adapter *); +static int igb_uc_sync(struct net_device *, const unsigned char *); +static int igb_uc_unsync(struct net_device *, const unsigned char *); +static void igb_restore_vf_multicasts(struct igb_adapter *adapter); +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos, __be16 vlan_proto); +static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting); +static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, + bool setting); +static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); +static void igb_check_vf_rate_limit(struct igb_adapter *); +static void igb_nfc_filter_exit(struct igb_adapter *adapter); +static void igb_nfc_filter_restore(struct igb_adapter *adapter); + +#ifdef CONFIG_PCI_IOV +static int igb_vf_configure(struct igb_adapter *adapter, int vf); +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); +static int igb_disable_sriov(struct pci_dev *dev); +static int igb_pci_disable_sriov(struct pci_dev *dev); +#endif + +static int igb_suspend(struct device *); +static int igb_resume(struct device *); +static int igb_runtime_suspend(struct device *dev); +static int igb_runtime_resume(struct device *dev); +static int igb_runtime_idle(struct device *dev); +static const struct dev_pm_ops igb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) + SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle) +}; +static void igb_shutdown(struct pci_dev *); +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#ifdef CONFIG_IGB_DCA +static int igb_notify_dca(struct notifier_block *, unsigned long, void *); +static struct notifier_block dca_notifier = { + .notifier_call = igb_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); +#endif /* CONFIG_PCI_IOV */ + +static pci_ers_result_t igb_io_error_detected(struct pci_dev *, + pci_channel_state_t); +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); +static void igb_io_resume(struct pci_dev *); + +static const struct pci_error_handlers igb_err_handler = { + .error_detected = igb_io_error_detected, + .slot_reset = igb_io_slot_reset, + .resume = igb_io_resume, +}; + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); + +static struct pci_driver igb_driver = { + .name = igb_driver_name, + .id_table = igb_pci_tbl, + .probe = igb_probe, + .remove = igb_remove, +#ifdef CONFIG_PM + .driver.pm = &igb_pm_ops, +#endif + .shutdown = igb_shutdown, + .sriov_configure = igb_pci_sriov_configure, + .err_handler = &igb_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +struct igb_reg_info { + u32 ofs; + char *name; +}; + +static const struct igb_reg_info igb_reg_info_tbl[] = { + + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* RX Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* igb_regdump - register printout routine */ +static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDLEN(n)); + break; + case E1000_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDH(n)); + break; + case E1000_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDT(n)); + break; + case E1000_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RXDCTL(n)); + break; + case E1000_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAL(n)); + break; + case E1000_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAH(n)); + break; + case E1000_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDBAL(n)); + break; + case E1000_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDBAH(n)); + break; + case E1000_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDLEN(n)); + break; + case E1000_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDH(n)); + break; + case E1000_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDT(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TXDCTL(n)); + break; + default: + pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igb_dump - Print registers, Tx-rings and Rx-rings */ +static void igb_dump(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_info *reginfo; + struct igb_ring *tx_ring; + union e1000_adv_tx_desc *tx_desc; + struct my_u0 { __le64 a; __le64 b; } *u0; + struct igb_ring *rx_ring; + union e1000_adv_rx_desc *rx_desc; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; + reginfo->name; reginfo++) { + igb_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igb_tx_buffer *buffer_info; + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igb_tx_buffer *buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info(" %5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igb_rx_buffer *buffer_info; + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGB_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address(buffer_info->page) + + buffer_info->page_offset, + igb_rx_bufsz(rx_ring), true); + } + } + } + } + +exit: + return; +} + +/** + * igb_get_i2c_data - Reads the I2C SDA data bit + * @data: opaque pointer to adapter struct + * + * Returns the I2C data bit value + **/ +static int igb_get_i2c_data(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return !!(i2cctl & E1000_I2C_DATA_IN); +} + +/** + * igb_set_i2c_data - Sets the I2C data bit + * @data: pointer to hardware structure + * @state: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static void igb_set_i2c_data(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) + i2cctl |= E1000_I2C_DATA_OUT; + else + i2cctl &= ~E1000_I2C_DATA_OUT; + + i2cctl &= ~E1000_I2C_DATA_OE_N; + i2cctl |= E1000_I2C_CLK_OE_N; + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); + +} + +/** + * igb_set_i2c_clk - Sets the I2C SCL clock + * @data: pointer to hardware structure + * @state: state to set clock + * + * Sets the I2C clock line to state + **/ +static void igb_set_i2c_clk(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) { + i2cctl |= E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } else { + i2cctl &= ~E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} + +/** + * igb_get_i2c_clk - Gets the I2C SCL clock state + * @data: pointer to hardware structure + * + * Gets the I2C clock state + **/ +static int igb_get_i2c_clk(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return !!(i2cctl & E1000_I2C_CLK_IN); +} + +static const struct i2c_algo_bit_data igb_i2c_algo = { + .setsda = igb_set_i2c_data, + .setscl = igb_set_i2c_clk, + .getsda = igb_get_i2c_data, + .getscl = igb_get_i2c_clk, + .udelay = 5, + .timeout = 20, +}; + +/** + * igb_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + **/ +struct net_device *igb_get_hw_dev(struct e1000_hw *hw) +{ + struct igb_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * igb_init_module - Driver Registration Routine + * + * igb_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init igb_init_module(void) +{ + int ret; + + pr_info("%s\n", igb_driver_string); + pr_info("%s\n", igb_copyright); + +#ifdef CONFIG_IGB_DCA + dca_register_notify(&dca_notifier); +#endif + ret = pci_register_driver(&igb_driver); + return ret; +} + +module_init(igb_init_module); + +/** + * igb_exit_module - Driver Exit Cleanup Routine + * + * igb_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit igb_exit_module(void) +{ +#ifdef CONFIG_IGB_DCA + dca_unregister_notify(&dca_notifier); +#endif + pci_unregister_driver(&igb_driver); +} + +module_exit(igb_exit_module); + +#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) +/** + * igb_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + **/ +static void igb_cache_ring_register(struct igb_adapter *adapter) +{ + int i = 0, j = 0; + u32 rbase_offset = adapter->vfs_allocated_count; + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* The queues are allocated for virtualization such that VF 0 + * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. + * In order to avoid collision we start at the first free queue + * and continue consuming queues in the same sequence + */ + if (adapter->vfs_allocated_count) { + for (; i < adapter->rss_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + + Q_IDX_82576(i); + } + fallthrough; + case e1000_82575: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = rbase_offset + j; + break; + } +} + +u32 igb_rd32(struct e1000_hw *hw, u32 reg) +{ + struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (E1000_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igb->netdev; + hw->hw_addr = NULL; + netdev_err(netdev, "PCIe link lost\n"); + WARN(pci_device_is_present(igb->pdev), + "igb: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +/** + * igb_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * This function is intended to handle the writing of the IVAR register + * for adapters 82576 and newer. The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + **/ +static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(E1000_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | E1000_IVAR_VALID) << offset; + + array_wr32(E1000_IVAR0, index, ivar); +} + +#define IGB_N0_QUEUE -1 +static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + int rx_queue = IGB_N0_QUEUE; + int tx_queue = IGB_N0_QUEUE; + u32 msixbm = 0; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case e1000_82575: + /* The 82575 assigns vectors using a bitmask, which matches the + * bitmask for the EICR/EIMS/EIMC registers. To assign one + * or more queues to a vector, we write the appropriate bits + * into the MSIXBM register for that vector. + */ + if (rx_queue > IGB_N0_QUEUE) + msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; + if (tx_queue > IGB_N0_QUEUE) + msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; + array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); + q_vector->eims_value = msixbm; + break; + case e1000_82576: + /* 82576 uses a table that essentially consists of 2 columns + * with 8 rows. The ordering is column-major so we use the + * lower 3 bits as the row index, and the 4th bit as the + * column offset. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue & 0x7, + (rx_queue & 0x8) << 1); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue & 0x7, + ((tx_queue & 0x8) << 1) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* On 82580 and newer adapters the scheme is similar to 82576 + * however instead of ordering column-major we have things + * ordered row-major. So we traverse the table by using + * bit 0 as the column offset, and the remaining bits as the + * row index. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + BUG(); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igb_configure_msix - Configure MSI-X hardware + * @adapter: board private structure to initialize + * + * igb_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void igb_configure_msix(struct igb_adapter *adapter) +{ + u32 tmp; + int i, vector = 0; + struct e1000_hw *hw = &adapter->hw; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case e1000_82575: + tmp = rd32(E1000_CTRL_EXT); + /* enable MSI-X PBA support*/ + tmp |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask interrupts upon ICR read. */ + tmp |= E1000_CTRL_EXT_EIAME; + tmp |= E1000_CTRL_EXT_IRCA; + + wr32(E1000_CTRL_EXT, tmp); + + /* enable msix_other interrupt */ + array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); + adapter->eims_other = E1000_EIMS_OTHER; + + break; + + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_GPIE_PBA | E1000_GPIE_EIAME | + E1000_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | E1000_IVAR_VALID) << 8; + + wr32(E1000_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igb_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igb_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure to initialize + * + * igb_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int igb_request_msix(struct igb_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + struct net_device *netdev = adapter->netdev; + int i, err = 0, vector = 0, free_vector = 0; + + if (adapter->ecdev) { + return 0; + } + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igb_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igb_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + **/ +static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igb_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igb_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igb_free_q_vector. + **/ +static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* Coming from igb_set_interrupt_capability, the vectors are not yet + * allocated. So, q_vector is NULL so we should stop here. + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + if (!adapter->ecdev) + netif_napi_del(&q_vector->napi); + +} + +static void igb_reset_interrupt_capability(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + pci_disable_msix(adapter->pdev); + else if (adapter->flags & IGB_FLAG_HAS_MSI) + pci_disable_msi(adapter->pdev); + + while (v_idx--) + igb_reset_q_vector(adapter, v_idx); +} + +/** + * igb_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void igb_free_q_vectors(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igb_reset_q_vector(adapter, v_idx); + igb_free_q_vector(adapter, v_idx); + } +} + +/** + * igb_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: board private structure to initialize + * + * This function resets the device so that it has 0 Rx queues, Tx queues, and + * MSI-X interrupts allocated. + */ +static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) +{ + igb_free_q_vectors(adapter); + igb_reset_interrupt_capability(adapter); +} + +/** + * igb_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) +{ + int err; + int numvecs, i; + + if (!msix) + goto msi_only; + adapter->flags |= IGB_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + if (adapter->vfs_allocated_count) + adapter->num_tx_queues = 1; + else + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + igb_reset_interrupt_capability(adapter); + + /* If we can't do MSI-X, try MSI */ +msi_only: + adapter->flags &= ~IGB_FLAG_HAS_MSIX; +#ifdef CONFIG_PCI_IOV + /* disable SR-IOV for non MSI-X configurations */ + if (adapter->vf_data) { + struct e1000_hw *hw = &adapter->hw; + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); + msleep(500); + + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&adapter->pdev->dev, "IOV Disabled\n"); + } +#endif + adapter->vfs_allocated_count = 0; + adapter->rss_queues = 1; + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGB_FLAG_HAS_MSI; +} + +static void igb_add_ring(struct igb_ring *ring, + struct igb_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igb_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int igb_alloc_q_vector(struct igb_adapter *adapter, + int v_count, int v_idx, + int txr_count, int txr_idx, + int rxr_count, int rxr_idx) +{ + struct igb_q_vector *q_vector; + struct igb_ring *ring; + int ring_count; + size_t size; + + /* igb only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + size = struct_size(q_vector, ring, ring_count); + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) { + q_vector = kzalloc(size, GFP_KERNEL); + } else if (size > ksize(q_vector)) { + kfree_rcu(q_vector, rcu); + q_vector = kzalloc(size, GFP_KERNEL); + } else { + memset(q_vector, 0, size); + } + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + if (!adapter->ecdev) { + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); + } + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igb_add_ring(ring, &q_vector->tx); + + /* For 82575, context index must be unique per ring. */ + if (adapter->hw.mac.type == e1000_82575) + set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + ring->cbs_enable = false; + ring->idleslope = 0; + ring->sendslope = 0; + ring->hicredit = 0; + ring->locredit = 0; + + u64_stats_init(&ring->tx_syncp); + u64_stats_init(&ring->tx_syncp2); + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igb_add_ring(ring, &q_vector->rx); + + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); + + /* On i350, i354, i210, and i211, loopback VLAN packets + * have the tag byte-swapped. + */ + if (adapter->hw.mac.type >= e1000_i350) + set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + u64_stats_init(&ring->rx_syncp); + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + + +/** + * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int igb_alloc_q_vectors(struct igb_adapter *adapter) +{ + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igb_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * This function initializes the interrupts and allocates all of the queues. + **/ +static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + + igb_set_interrupt_capability(adapter, msix); + + err = igb_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igb_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igb_reset_interrupt_capability(adapter); + return err; +} + +/** + * igb_request_irq - initialize interrupts + * @adapter: board private structure to initialize + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igb_request_irq(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + err = igb_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + igb_clear_interrupt_scheme(adapter); + err = igb_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + + igb_setup_all_tx_resources(adapter); + igb_setup_all_rx_resources(adapter); + igb_configure(adapter); + } + + igb_assign_vector(adapter->q_vector[0], 0); + + if (!adapter->ecdev && adapter->flags & IGB_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, igb_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igb_reset_interrupt_capability(adapter); + adapter->flags &= ~IGB_FLAG_HAS_MSI; + } + + if (!adapter->ecdev) { + err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + dev_err(&pdev->dev, "Error %d getting interrupt\n", + err); + } + +request_done: + return err; +} + +static void igb_free_irq(struct igb_adapter *adapter) +{ + if (adapter->ecdev) + return; + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igb_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void igb_irq_disable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* we need to be careful when disabling interrupts. The VFs are also + * mapped into these registers and so clearing the bits can cause + * issues on the VF drivers so we only need to clear what we set + */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 regval = rd32(E1000_EIAM); + + wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); + wr32(E1000_EIMC, adapter->eims_enable_mask); + regval = rd32(E1000_EIAC); + wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(E1000_IAM, 0); + wr32(E1000_IMC, ~0); + wrfl(); + + if (adapter->ecdev) { + /* skip synchronizing IRQs */ + return; + } + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * igb_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void igb_irq_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) + return; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; + u32 regval = rd32(E1000_EIAC); + + wr32(E1000_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(E1000_EIAM); + wr32(E1000_EIAM, regval | adapter->eims_enable_mask); + wr32(E1000_EIMS, adapter->eims_enable_mask); + if (adapter->vfs_allocated_count) { + wr32(E1000_MBVFIMR, 0xFF); + ims |= E1000_IMS_VMMB; + } + wr32(E1000_IMS, ims); + } else { + wr32(E1000_IMS, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + wr32(E1000_IAM, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + } +} + +static void igb_update_mng_vlan(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 pf_id = adapter->vfs_allocated_count; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + /* add VID to filter table */ + igb_vfta_set(hw, vid, pf_id, true, true); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; + } + + if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) { + /* remove VID from filter table */ + igb_vfta_set(hw, vid, pf_id, false, true); + } +} + +/** + * igb_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + **/ +static void igb_release_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); +} + +/** + * igb_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + **/ +static void igb_get_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); +} + +static void enable_fqtss(struct igb_adapter *adapter, bool enable) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + + WARN_ON(hw->mac.type != e1000_i210); + + if (enable) + adapter->flags |= IGB_FLAG_FQTSS; + else + adapter->flags &= ~IGB_FLAG_FQTSS; + + if (netif_running(netdev)) + schedule_work(&adapter->reset_task); +} + +static bool is_fqtss_enabled(struct igb_adapter *adapter) +{ + return (adapter->flags & IGB_FLAG_FQTSS) ? true : false; +} + +static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue, + enum tx_queue_prio prio) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 4); + + val = rd32(E1000_I210_TXDCTL(queue)); + + if (prio == TX_QUEUE_PRIO_HIGH) + val |= E1000_TXDCTL_PRIORITY; + else + val &= ~E1000_TXDCTL_PRIORITY; + + wr32(E1000_I210_TXDCTL(queue), val); +} + +static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + + val = rd32(E1000_I210_TQAVCC(queue)); + + if (mode == QUEUE_MODE_STREAM_RESERVATION) + val |= E1000_TQAVCC_QUEUEMODE; + else + val &= ~E1000_TQAVCC_QUEUEMODE; + + wr32(E1000_I210_TQAVCC(queue), val); +} + +static bool is_any_cbs_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->cbs_enable) + return true; + } + + return false; +} + +static bool is_any_txtime_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->launchtime_enable) + return true; + } + + return false; +} + +/** + * igb_config_tx_modes - Configure "Qav Tx mode" features on igb + * @adapter: pointer to adapter struct + * @queue: queue number + * + * Configure CBS and Launchtime for a given hardware queue. + * Parameters are retrieved from the correct Tx ring, so + * igb_save_cbs_params() and igb_save_txtime_params() should be used + * for setting those correctly prior to this function being called. + **/ +static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_ring *ring; + u32 tqavcc, tqavctrl; + u16 value; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + ring = adapter->tx_ring[queue]; + + /* If any of the Qav features is enabled, configure queues as SR and + * with HIGH PRIO. If none is, then configure them with LOW PRIO and + * as SP. + */ + if (ring->cbs_enable || ring->launchtime_enable) { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); + set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + } else { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); + set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); + } + + /* If CBS is enabled, set DataTranARB and config its parameters. */ + if (ring->cbs_enable || queue == 0) { + /* i210 does not allow the queue 0 to be in the Strict + * Priority mode while the Qav mode is enabled, so, + * instead of disabling strict priority mode, we give + * queue 0 the maximum of credits possible. + * + * See section 8.12.19 of the i210 datasheet, "Note: + * Queue0 QueueMode must be set to 1b when + * TransmitMode is set to Qav." + */ + if (queue == 0 && !ring->cbs_enable) { + /* max "linkspeed" idleslope in kbps */ + ring->idleslope = 1000000; + ring->hicredit = ETH_FRAME_LEN; + } + + /* Always set data transfer arbitration to credit-based + * shaper algorithm on TQAVCTRL if CBS is enabled for any of + * the queues. + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + + /* According to i210 datasheet section 7.2.7.7, we should set + * the 'idleSlope' field from TQAVCC register following the + * equation: + * + * For 100 Mbps link speed: + * + * value = BW * 0x7735 * 0.2 (E1) + * + * For 1000Mbps link speed: + * + * value = BW * 0x7735 * 2 (E2) + * + * E1 and E2 can be merged into one equation as shown below. + * Note that 'link-speed' is in Mbps. + * + * value = BW * 0x7735 * 2 * link-speed + * -------------- (E3) + * 1000 + * + * 'BW' is the percentage bandwidth out of full link speed + * which can be found with the following equation. Note that + * idleSlope here is the parameter from this function which + * is in kbps. + * + * BW = idleSlope + * ----------------- (E4) + * link-speed * 1000 + * + * That said, we can come up with a generic equation to + * calculate the value we should set it TQAVCC register by + * replacing 'BW' in E3 by E4. The resulting equation is: + * + * value = idleSlope * 0x7735 * 2 * link-speed + * ----------------- -------------- (E5) + * link-speed * 1000 1000 + * + * 'link-speed' is present in both sides of the fraction so + * it is canceled out. The final equation is the following: + * + * value = idleSlope * 61034 + * ----------------- (E6) + * 1000000 + * + * NOTE: For i210, given the above, we can see that idleslope + * is represented in 16.38431 kbps units by the value at + * the TQAVCC register (1Gbps / 61034), which reduces + * the granularity for idleslope increments. + * For instance, if you want to configure a 2576kbps + * idleslope, the value to be written on the register + * would have to be 157.23. If rounded down, you end + * up with less bandwidth available than originally + * required (~2572 kbps). If rounded up, you end up + * with a higher bandwidth (~2589 kbps). Below the + * approach we take is to always round up the + * calculated value, so the resulting bandwidth might + * be slightly higher for some configurations. + */ + value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); + + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + tqavcc |= value; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + wr32(E1000_I210_TQAVHC(queue), + 0x80000000 + ring->hicredit * 0x7735); + } else { + + /* Set idleSlope to zero. */ + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + /* Set hiCredit to zero. */ + wr32(E1000_I210_TQAVHC(queue), 0); + + /* If CBS is not enabled for any queues anymore, then return to + * the default state of Data Transmission Arbitration on + * TQAVCTRL. + */ + if (!is_any_cbs_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* If LaunchTime is enabled, set DataTranTIM. */ + if (ring->launchtime_enable) { + /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled + * for any of the SR queues, and configure fetchtime delta. + * XXX NOTE: + * - LaunchTime will be enabled for all SR queues. + * - A fixed offset can be added relative to the launch + * time of all packets if configured at reg LAUNCH_OS0. + * We are keeping it as 0 for now (default value). + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANTIM | + E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } else { + /* If Launchtime is not enabled for any SR queues anymore, + * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta, + * effectively disabling Launchtime. + */ + if (!is_any_txtime_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM; + tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* XXX: In i210 controller the sendSlope and loCredit parameters from + * CBS are not configurable by software so we don't do any 'controller + * configuration' in respect to these parameters. + */ + + netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", + ring->cbs_enable ? "enabled" : "disabled", + ring->launchtime_enable ? "enabled" : "disabled", + queue, + ring->idleslope, ring->sendslope, + ring->hicredit, ring->locredit); +} + +static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, + bool enable) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + +static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +/** + * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable + * @adapter: pointer to adapter struct + * + * Configure TQAVCTRL register switching the controller's Tx mode + * if FQTSS mode is enabled or disabled. Additionally, will issue + * a call to igb_config_tx_modes() per queue so any previously saved + * Tx parameters are applied. + **/ +static void igb_setup_tx_mode(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 val; + + /* Only i210 controller supports changing the transmission mode. */ + if (hw->mac.type != e1000_i210) + return; + + if (is_fqtss_enabled(adapter)) { + int i, max_queue; + + /* Configure TQAVCTRL register: set transmit mode to 'Qav', + * set data fetch arbitration to 'round robin', set SP_WAIT_SR + * so SP queues wait for SR ones. + */ + val = rd32(E1000_I210_TQAVCTRL); + val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR; + val &= ~E1000_TQAVCTRL_DATAFETCHARB; + wr32(E1000_I210_TQAVCTRL, val); + + /* Configure Tx and Rx packet buffers sizes as described in + * i210 datasheet section 7.2.7.7. + */ + val = rd32(E1000_TXPBS); + val &= ~I210_TXPBSIZE_MASK; + val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB | + I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB; + wr32(E1000_TXPBS, val); + + val = rd32(E1000_RXPBS); + val &= ~I210_RXPBSIZE_MASK; + val |= I210_RXPBSIZE_PB_30KB; + wr32(E1000_RXPBS, val); + + /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ + * register should not exceed the buffer size programmed in + * TXPBS. The smallest buffer size programmed in TXPBS is 4kB + * so according to the datasheet we should set MAX_TPKT_SIZE to + * 4kB / 64. + * + * However, when we do so, no frame from queue 2 and 3 are + * transmitted. It seems the MAX_TPKT_SIZE should not be great + * or _equal_ to the buffer size programmed in TXPBS. For this + * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64. + */ + val = (4096 - 1) / 64; + wr32(E1000_I210_DTXMXPKTSZ, val); + + /* Since FQTSS mode is enabled, apply any CBS configuration + * previously set. If no previous CBS configuration has been + * done, then the initial configuration is applied, which means + * CBS is disabled. + */ + max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ? + adapter->num_tx_queues : I210_SR_QUEUES_NUM; + + for (i = 0; i < max_queue; i++) { + igb_config_tx_modes(adapter, i); + } + } else { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT); + + val = rd32(E1000_I210_TQAVCTRL); + /* According to Section 8.12.21, the other flags we've set when + * enabling FQTSS are not relevant when disabling FQTSS so we + * don't set they here. + */ + val &= ~E1000_TQAVCTRL_XMIT_MODE; + wr32(E1000_I210_TQAVCTRL, val); + } + + netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ? + "enabled" : "disabled"); +} + +/** + * igb_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void igb_configure(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + igb_get_hw_control(adapter); + igb_set_rx_mode(netdev); + igb_setup_tx_mode(adapter); + + igb_restore_vlan(adapter); + + igb_setup_tctl(adapter); + igb_setup_mrqc(adapter); + igb_setup_rctl(adapter); + + igb_nfc_filter_restore(adapter); + igb_configure_tx(adapter); + igb_configure_rx(adapter); + + igb_rx_fifo_flush_82575(&adapter->hw); + + /* call igb_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); + } +} + +/** + * igb_power_up_link - Power up the phy/serdes link + * @adapter: address of board private structure + **/ +void igb_power_up_link(struct igb_adapter *adapter) +{ + igb_reset_phy(&adapter->hw); + + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_up_phy_copper(&adapter->hw); + else + igb_power_up_serdes_link_82575(&adapter->hw); + + igb_setup_link(&adapter->hw); +} + +/** + * igb_power_down_link - Power down the phy/serdes link + * @adapter: address of board private structure + */ +static void igb_power_down_link(struct igb_adapter *adapter) +{ + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_down_phy_copper_82575(&adapter->hw); + else + igb_shutdown_serdes_link_82575(&adapter->hw); +} + +/** + * igb_check_swap_media - Detect and switch function for Media Auto Sense + * @adapter: address of the board private structure + **/ +static void igb_check_swap_media(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext, connsw; + bool swap_now = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + connsw = rd32(E1000_CONNSW); + + /* need to live swap if current media is copper and we have fiber/serdes + * to go to. + */ + + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { + swap_now = true; + } else if ((hw->phy.media_type != e1000_media_type_copper) && + !(connsw & E1000_CONNSW_SERDESD)) { + /* copper signal takes time to appear */ + if (adapter->copper_tries < 4) { + adapter->copper_tries++; + connsw |= E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + return; + } else { + adapter->copper_tries = 0; + if ((connsw & E1000_CONNSW_PHYSD) && + (!(connsw & E1000_CONNSW_PHY_PDN))) { + swap_now = true; + connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + } + } + } + + if (!swap_now) + return; + + switch (hw->phy.media_type) { + case e1000_media_type_copper: + netdev_info(adapter->netdev, + "MAS: changing media to fiber/serdes\n"); + ctrl_ext |= + E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + adapter->copper_tries = 0; + break; + case e1000_media_type_internal_serdes: + case e1000_media_type_fiber: + netdev_info(adapter->netdev, + "MAS: changing media to copper\n"); + ctrl_ext &= + ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + break; + default: + /* shouldn't get here during regular operation */ + netdev_err(adapter->netdev, + "AMS: Invalid media type found, returning\n"); + break; + } + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +/** + * igb_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + **/ +int igb_up(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* hardware has been reset, we need to reload some things */ + igb_configure(adapter); + + clear_bit(__IGB_DOWN, &adapter->state); + + if (!adapter->ecdev) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); + } + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + igb_configure_msix(adapter); + else + igb_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(E1000_TSICR); + rd32(E1000_ICR); + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + if (!adapter->ecdev) { + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + } + + if ((adapter->flags & IGB_FLAG_EEE) && + (!hw->dev_spec._82575.eee_disable)) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + + return 0; +} + +void igb_down(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__IGB_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = rd32(E1000_RCTL); + wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + igb_nfc_filter_exit(adapter); + + if (!adapter->ecdev) { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + /* disable transmits in the hardware */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_EN; + wr32(E1000_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 11000); + + igb_irq_disable(adapter); + + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (!adapter->ecdev && adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igb_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + + igb_clean_all_tx_rings(adapter); + igb_clean_all_rx_rings(adapter); +#ifdef CONFIG_IGB_DCA + + /* since we reset the hardware DCA settings were cleared */ + igb_setup_dca(adapter); +#endif +} + +void igb_reinit_locked(struct igb_adapter *adapter) +{ + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igb_down(adapter); + igb_up(adapter); + clear_bit(__IGB_RESETTING, &adapter->state); +} + +/** igb_enable_mas - Media Autosense re-enable after swap + * + * @adapter: adapter struct + **/ +static void igb_enable_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 connsw = rd32(E1000_CONNSW); + + /* configure for SerDes media detect */ + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_SERDESD))) { + connsw |= E1000_CONNSW_ENRGSRC; + connsw |= E1000_CONNSW_AUTOSENSE_EN; + wr32(E1000_CONNSW, connsw); + wrfl(); + } +} + +void igb_reset(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + switch (mac->type) { + case e1000_i350: + case e1000_i354: + case e1000_82580: + pba = rd32(E1000_RXPBS); + pba = igb_rxpbs_adjust_82580(pba); + break; + case e1000_82576: + pba = rd32(E1000_RXPBS); + pba &= E1000_RXPBS_SIZE_MASK_82576; + break; + case e1000_82575: + case e1000_i210: + case e1000_i211: + default: + pba = E1000_PBA_34K; + break; + } + + if (mac->type == e1000_82575) { + u32 min_rx_space, min_tx_space, needed_tx_space; + + /* write Rx PBA so that hardware can report correct Tx PBA */ + wr32(E1000_PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024); + + /* The Tx FIFO also stores 16 bytes of information about the Tx + * but don't include Ethernet FCS because hardware appends it. + * We only need to round down to the nearest 512 byte block + * count since the value we care about is 2 frames, not 1. + */ + min_tx_space = adapter->max_frame_size; + min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN; + min_tx_space = DIV_ROUND_UP(min_tx_space, 512); + + /* upper 16 bits has Tx packet buffer allocation size in KB */ + needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16); + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation. + */ + if (needed_tx_space < pba) { + pba -= needed_tx_space; + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + + /* adjust PBA for jumbo frames */ + wr32(E1000_PBA, pba); + } + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + /* disable receive for all VFs and wait one second */ + if (adapter->vfs_allocated_count) { + int i; + + for (i = 0 ; i < adapter->vfs_allocated_count; i++) + adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; + + /* ping all the active vfs to let them know we are going down */ + igb_ping_all_vfs(adapter); + + /* disable transmits and receives */ + wr32(E1000_VFRE, 0); + wr32(E1000_VFTE, 0); + } + + /* Allow time for pending master requests to run */ + hw->mac.ops.reset_hw(hw); + wr32(E1000_WUC, 0); + + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + /* need to resetup here after media swap */ + adapter->ei.get_invariants(hw); + adapter->flags &= ~IGB_FLAG_MEDIA_RESET; + } + if ((mac->type == e1000_82575 || mac->type == e1000_i350) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_enable_mas(adapter); + } + if (hw->mac.ops.init_hw(hw)) + dev_err(&pdev->dev, "Hardware Error\n"); + + /* RAR registers were cleared during init_hw, clear mac table */ + igb_flush_mac_table(adapter); + __dev_uc_unsync(adapter->netdev, NULL); + + /* Recover default RAR entry */ + igb_set_default_mac_filter(adapter); + + /* Flow control settings reset on hardware reset, so guarantee flow + * control is off when forcing speed. + */ + if (!hw->mac.autoneg) + igb_force_mac_fc(hw); + + igb_init_dmac(adapter, pba); +#ifdef CONFIG_IGB_HWMON + /* Re-initialize the thermal sensor on i350 devices. */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (mac->type == e1000_i350 && hw->bus.func == 0) { + /* If present, re-initialize the external thermal sensor + * interface. + */ + if (adapter->ets) + mac->ops.init_thermal_sensor_thresh(hw); + } + } +#endif + /* Re-establish EEE setting */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (mac->type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + igb_set_eee_i350(hw, true, true); + break; + case e1000_i354: + igb_set_eee_i354(hw, true, true); + break; + default: + break; + } + } + if (!adapter->ecdev && !netif_running(adapter->netdev)) + igb_power_down_link(adapter); + + igb_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + + /* Re-enable PTP, where applicable. */ + if (adapter->ptp_flags & IGB_PTP_ENABLED) + igb_ptp_reset(adapter); + + igb_get_phy_info(hw); +} + +static netdev_features_t igb_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igb_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igb_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igb_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) { + struct hlist_node *node2; + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + hlist_for_each_entry_safe(rule, node2, + &adapter->nfc_filter_list, nfc_node) { + igb_erase_filter(adapter, rule); + hlist_del(&rule->nfc_node); + kfree(rule); + } + spin_unlock(&adapter->nfc_lock); + adapter->nfc_filter_count = 0; + } + + netdev->features = features; + + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + + return 1; +} + +static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags, + struct netlink_ext_ack *extack) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + struct igb_adapter *adapter = netdev_priv(dev); + int vfn = adapter->vfs_allocated_count; + + if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn)) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + +#define IGB_MAX_MAC_HDR_LEN 127 +#define IGB_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +igb_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_GSO_UDP_L4 | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_GSO_UDP_L4 | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igb_offload_apply(struct igb_adapter *adapter, s32 queue) +{ + if (!is_fqtss_enabled(adapter)) { + enable_fqtss(adapter, true); + return; + } + + igb_config_tx_modes(adapter, queue); + + if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter)) + enable_fqtss(adapter, false); +} + +static int igb_offload_cbs(struct igb_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* CBS offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* CBS offloading is only supported by queue 0 and queue 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +#define VLAN_PRIO_FULL_MASK (0x07) + +static int igb_parse_cls_flower(struct igb_adapter *adapter, + struct flow_cls_offload *f, + int traffic_class, + struct igb_nfc_filter *input) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; + struct netlink_ext_ack *extack = f->common.extack; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported"); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + if (!is_zero_ether_addr(match.mask->dst)) { + if (!is_broadcast_ether_addr(match.mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, match.key->dst); + } + + if (!is_zero_ether_addr(match.mask->src)) { + if (!is_broadcast_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, match.key->src); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + if (match.mask->n_proto) { + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; + input->filter.etype = match.key->n_proto; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_priority) { + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + input->filter.vlan_tci = + (__force __be16)match.key->vlan_priority; + } + } + + input->action = traffic_class; + input->cookie = f->cookie; + + return 0; +} + +static int igb_configure_clsflower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + struct netlink_ext_ack *extack = cls_flower->common.extack; + struct igb_nfc_filter *filter, *f; + int err, tc; + + tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); + if (tc < 0) { + NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class"); + return -EINVAL; + } + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + err = igb_parse_cls_flower(adapter, cls_flower, tc, filter); + if (err < 0) + goto err_parse; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in ethtool"); + goto err_locked; + } + } + + hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in cls_flower"); + goto err_locked; + } + } + + err = igb_add_filter(adapter, filter); + if (err < 0) { + NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter"); + goto err_locked; + } + + hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list); + + spin_unlock(&adapter->nfc_lock); + + return 0; + +err_locked: + spin_unlock(&adapter->nfc_lock); + +err_parse: + kfree(filter); + + return err; +} + +static int igb_delete_clsflower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + struct igb_nfc_filter *filter; + int err; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node) + if (filter->cookie == cls_flower->cookie) + break; + + if (!filter) { + err = -ENOENT; + goto out; + } + + err = igb_erase_filter(adapter, filter); + if (err < 0) + goto out; + + hlist_del(&filter->nfc_node); + kfree(filter); + +out: + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return igb_configure_clsflower(adapter, cls_flower); + case FLOW_CLS_DESTROY: + return igb_delete_clsflower(adapter, cls_flower); + case FLOW_CLS_STATS: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct igb_adapter *adapter = cb_priv; + + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return igb_setup_tc_cls_flower(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igb_offload_txtime(struct igb_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* Launchtime offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* Launchtime offloading is only supported by queues 0 and 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + +static LIST_HEAD(igb_block_cb_list); + +static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_CBS: + return igb_offload_cbs(adapter, type_data); + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &igb_block_cb_list, + igb_setup_tc_block_cb, + adapter, adapter, true); + + case TC_SETUP_QDISC_ETF: + return igb_offload_txtime(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) +{ + int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD; + struct igb_adapter *adapter = netdev_priv(dev); + struct bpf_prog *prog = bpf->prog, *old_prog; + bool running = netif_running(dev); + bool need_reset; + + /* verify igb ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + if (frame_size > igb_rx_bufsz(ring)) { + NL_SET_ERR_MSG_MOD(bpf->extack, + "The RX buffer size is too small for the frame size"); + netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n", + igb_rx_bufsz(ring), frame_size); + return -EINVAL; + } + } + + old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); + + /* device is up and bpf is added/removed, must setup the RX queues */ + if (need_reset && running) { + igb_close(dev); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + (void)xchg(&adapter->rx_ring[i]->xdp_prog, + adapter->xdp_prog); + } + + if (old_prog) + bpf_prog_put(old_prog); + + /* bpf is just replaced, RXQ and MTU are already setup */ + if (!need_reset) + return 0; + + if (running) + igb_open(dev); + + return 0; +} + +static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct igb_adapter *adapter = netdev_priv(dev); + if (adapter->ecdev) + return -EBUSY; + + + switch (xdp->command) { + case XDP_SETUP_PROG: + return igb_xdp_setup(dev, xdp); + default: + return -EINVAL; + } +} + +static void igb_xdp_ring_update_tail(struct igb_ring *ring) +{ + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) +{ + unsigned int r_idx = smp_processor_id(); + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct igb_ring *tx_ring; + struct netdev_queue *nq; + u32 ret; + + if (unlikely(!xdpf)) + return IGB_XDP_CONSUMED; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + if (unlikely(!tx_ring)) + return IGB_XDP_CONSUMED; + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + nq->trans_start = jiffies; + ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); + __netif_tx_unlock(nq); + + return ret; +} + +static int igb_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct igb_ring *tx_ring; + struct netdev_queue *nq; + int nxmit = 0; + int i; + + if (adapter->ecdev) + return -EBUSY; + + if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + if (unlikely(!tx_ring)) + return -ENXIO; + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + + /* Avoid transmit queue timeout since we share it with the slow path */ + nq->trans_start = jiffies; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); + if (err != IGB_XDP_TX) + break; + nxmit++; + } + + __netif_tx_unlock(nq); + + if (unlikely(flags & XDP_XMIT_FLUSH)) + igb_xdp_ring_update_tail(tx_ring); + + return nxmit; +} + +static const struct net_device_ops igb_netdev_ops = { + .ndo_open = igb_open, + .ndo_stop = igb_close, + .ndo_start_xmit = igb_xmit_frame, + .ndo_get_stats64 = igb_get_stats64, + .ndo_set_rx_mode = igb_set_rx_mode, + .ndo_set_mac_address = igb_set_mac, + .ndo_change_mtu = igb_change_mtu, + .ndo_do_ioctl = igb_ioctl, + .ndo_tx_timeout = igb_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, + .ndo_set_vf_mac = igb_ndo_set_vf_mac, + .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, + .ndo_set_vf_rate = igb_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, + .ndo_set_vf_trust = igb_ndo_set_vf_trust, + .ndo_get_vf_config = igb_ndo_get_vf_config, + .ndo_fix_features = igb_fix_features, + .ndo_set_features = igb_set_features, + .ndo_fdb_add = igb_ndo_fdb_add, + .ndo_features_check = igb_features_check, + .ndo_setup_tc = igb_setup_tc, + .ndo_bpf = igb_xdp, + .ndo_xdp_xmit = igb_xdp_xmit, +}; + +/** +* ec_poll - EtherCAT poll routine +* @netdev: net device structure +* +* This function can never fail. +* +**/ +void ec_poll(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + int budget = 64; + + if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) { + struct e1000_hw *hw = &adapter->hw; + bool link; + hw->mac.get_link_status = true; + link = igb_has_link(adapter); + ecdev_set_link(adapter->ecdev, link); + adapter->ec_watchdog_jiffies = jiffies; + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + if (q_vector->tx.ring) { + igb_clean_tx_irq(q_vector, budget); + } + + if (q_vector->rx.ring) { + igb_clean_rx_irq(q_vector, budget); + } + } +} + +/** + * igb_set_fw_version - Configure version string for ethtool + * @adapter: adapter struct + **/ +void igb_set_fw_version(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_fw_version fw; + + igb_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (!(igb_get_flash_presence_i210(hw))) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); + break; + } + fallthrough; + default: + /* if option is rom valid, display its version too */ + if (fw.or_valid) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ + } else if (fw.etrack_id != 0X0000) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); + } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); + } + break; + } +} + +/** + * igb_init_mas - init Media Autosense feature if enabled in the NVM + * + * @adapter: adapter struct + **/ +static void igb_init_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 eeprom_data; + + hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); + switch (hw->bus.func) { + case E1000_FUNC_0: + if (eeprom_data & IGB_MAS_ENABLE_0) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_1: + if (eeprom_data & IGB_MAS_ENABLE_1) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_2: + if (eeprom_data & IGB_MAS_ENABLE_2) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_3: + if (eeprom_data & IGB_MAS_ENABLE_3) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + default: + /* Shouldn't get here */ + netdev_err(adapter->netdev, + "MAS: Invalid port configuration, returning\n"); + break; + } +} + +/** + * igb_init_i2c - Init I2C interface + * @adapter: pointer to adapter structure + **/ +static s32 igb_init_i2c(struct igb_adapter *adapter) +{ + s32 status = 0; + + /* I2C interface supported on i350 devices */ + if (adapter->hw.mac.type != e1000_i350) + return 0; + + /* Initialize the i2c bus which is controlled by the registers. + * This bus will use the i2c_algo_bit structure that implements + * the protocol through toggling of the 4 bits in the register. + */ + adapter->i2c_adap.owner = THIS_MODULE; + adapter->i2c_algo = igb_i2c_algo; + adapter->i2c_algo.data = adapter; + adapter->i2c_adap.algo_data = &adapter->i2c_algo; + adapter->i2c_adap.dev.parent = &adapter->pdev->dev; + strlcpy(adapter->i2c_adap.name, "igb BB", + sizeof(adapter->i2c_adap.name)); + status = i2c_bit_add_bus(&adapter->i2c_adap); + return status; +} + +/** + * igb_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igb_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igb_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct igb_adapter *adapter; + struct e1000_hw *hw; + u16 eeprom_data = 0; + s32 ret_val; + static int global_quad_port_a; /* global quad port a indication */ + const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; + int err, pci_using_dac; + u8 part_str[E1000_PBANUM_LENGTH]; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, KERN_ERR "%s (%x:%x) should not be a VF!\n", + pci_name(pdev), pdev->vendor, pdev->device); + return -EINVAL; + } + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + + err = pci_request_mem_regions(pdev, igb_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), + IGB_MAX_TX_QUEUES); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = -EIO; + adapter->io_addr = pci_iomap(pdev, 0, 0); + if (!adapter->io_addr) + goto err_ioremap; + /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igb_netdev_ops; + igb_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC, PHY and NVM function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* setup the private structure */ + err = igb_sw_init(adapter); + if (err) + goto err_sw_init; + + igb_get_bus_info_pcie(hw); + + hw->phy.autoneg_wait_to_complete = false; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = AUTO_ALL_MODES; + hw->phy.disable_polarity_correction = false; + hw->phy.ms_type = e1000_ms_hw_default; + } + + if (igb_check_reset_block(hw)) + dev_info(&pdev->dev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + /* features is initialized to 0 in allocation, it might have bits + * set by igb_sw_init so we should use an or instead of an + * assignment. + */ + netdev->features |= NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; + + if (hw->mac.type >= e1000_82576) + netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; + + if (hw->mac.type >= e1000_i350) + netdev->features |= NETIF_F_HW_TC; + +#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL; + + if (hw->mac.type >= e1000_i350) + netdev->hw_features |= NETIF_F_NTUPLE; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + /* make sure the NVM is good , i211/i210 parts can have special NVM + * that doesn't contain a checksum + */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, + "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + break; + default: + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + break; + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + igb_set_default_mac_filter(adapter); + + /* get firmware version for ethtool -i */ + igb_set_fw_version(adapter); + + /* configure RXPBSIZE and TXPBSIZE */ + if (hw->mac.type == e1000_i210) { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + } + + timer_setup(&adapter->watchdog_timer, igb_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igb_reset_task); + INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0x2f; + + hw->fc.requested_mode = e1000_fc_default; + hw->fc.current_mode = e1000_fc_default; + + igb_validate_mdi_setting(hw); + + /* By default, support wake on port A */ + if (hw->bus.func == 0) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* Check the NVM for wake support on non-port A ports */ + if (hw->mac.type >= e1000_82580) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &eeprom_data); + else if (hw->bus.func == 1) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + + if (eeprom_data & IGB_EEPROM_APME) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* now that we have the eeprom settings, apply the special cases where + * the eeprom may be wrong or the board simply won't support wake on + * lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82575GB_QUAD_COPPER: + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + else + adapter->flags |= IGB_FLAG_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + /* If the device can't wake, don't set software support */ + if (!device_can_wakeup(&adapter->pdev->dev)) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + } + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) + adapter->wol |= E1000_WUFC_MAG; + + /* Some vendors want WoL disabled by default, but still supported */ + if ((hw->mac.type == e1000_i350) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + + /* Some vendors want the ability to Use the EEPROM setting as + * enable/disable only, and not for capability + */ + if (((hw->mac.type == e1000_i350) || + (hw->mac.type == e1000_i354)) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (hw->mac.type == e1000_i350) { + if (((pdev->subsystem_device == 0x5001) || + (pdev->subsystem_device == 0x5002)) && + (hw->bus.func == 0)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (pdev->subsystem_device == 0x1F52) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + } + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGB_FLAG_WOL_SUPPORTED); + + /* reset the hardware with the new settings */ + igb_reset(adapter); + + /* Init the I2C interface */ + err = igb_init_i2c(adapter); + if (err) { + dev_err(&pdev->dev, "failed to init i2c interface\n"); + goto err_eeprom; + } + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + + adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); + if (adapter->ecdev) { + err = ecdev_open(adapter->ecdev); + if (err) { + ecdev_withdraw(adapter->ecdev); + goto err_register; + } + adapter->ec_watchdog_jiffies = jiffies; + } else { + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + } + +#ifdef CONFIG_IGB_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + } + +#endif +#ifdef CONFIG_IGB_HWMON + /* Initialize the thermal sensor on i350 devices. */ + if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { + u16 ets_word; + + /* Read the NVM to determine if this i350 device supports an + * external thermal sensor. + */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); + if (ets_word != 0x0000 && ets_word != 0xFFFF) + adapter->ets = true; + else + adapter->ets = false; + if (igb_sysfs_init(adapter)) + dev_err(&pdev->dev, + "failed to allocate sysfs resources\n"); + } else { + adapter->ets = false; + } +#endif + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + if (hw->dev_spec._82575.mas_capable) + igb_init_mas(adapter); + + /* do hw tstamp init after resetting */ + igb_ptp_init(adapter); + + dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); + /* print bus type/speed/width info, not applicable to i354 */ + if (hw->mac.type != e1000_i354) { + dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", + netdev->name, + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : + (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : + "unknown"), + ((hw->bus.width == e1000_bus_width_pcie_x4) ? + "Width x4" : + (hw->bus.width == e1000_bus_width_pcie_x2) ? + "Width x2" : + (hw->bus.width == e1000_bus_width_pcie_x1) ? + "Width x1" : "unknown"), netdev->dev_addr); + } + + if ((hw->mac.type == e1000_82576 && + rd32(E1000_EECD) & E1000_EECD_PRES) || + (hw->mac.type >= e1000_i210 || + igb_get_flash_presence_i210(hw))) { + ret_val = igb_read_part_string(hw, part_str, + E1000_PBANUM_LENGTH); + } else { + ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; + } + + if (ret_val) + strcpy(part_str, "Unknown"); + dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); + dev_info(&pdev->dev, + "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", + (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : + (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", + adapter->num_rx_queues, adapter->num_tx_queues); + if (hw->phy.media_type == e1000_media_type_copper) { + switch (hw->mac.type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + /* Enable EEE for internal copper PHY devices */ + err = igb_set_eee_i350(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + break; + case e1000_i354: + if ((rd32(E1000_CTRL_EXT) & + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + err = igb_set_eee_i354(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + } + break; + default: + break; + } + } + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + + pm_runtime_put_noidle(&pdev->dev); + return 0; + +err_register: + igb_release_hw_control(adapter); + memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); +err_eeprom: + if (!igb_check_reset_block(hw)) + igb_reset_phy(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); +err_sw_init: + kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); + igb_clear_interrupt_scheme(adapter); +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif + pci_iounmap(pdev, adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +#ifdef CONFIG_PCI_IOV +static int igb_disable_sriov(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* reclaim resources allocated to VFs */ + if (adapter->vf_data) { + /* disable iov and allow time for transactions to clear */ + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); + return -EPERM; + } else { + pci_disable_sriov(pdev); + msleep(500); + } + + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&pdev->dev, "IOV Disabled\n"); + + /* Re-enable DMA Coalescing flag since IOV is turned off */ + adapter->flags |= IGB_FLAG_DMAC; + } + + return 0; +} + +static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + int old_vfs = pci_num_vf(pdev); + struct vf_mac_filter *mac_list; + int err = 0; + int num_vf_mac_filters, i; + + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { + err = -EPERM; + goto out; + } + if (!num_vfs) + goto out; + + if (old_vfs) { + dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", + old_vfs, max_vfs); + adapter->vfs_allocated_count = old_vfs; + } else + adapter->vfs_allocated_count = num_vfs; + + adapter->vf_data = kcalloc(adapter->vfs_allocated_count, + sizeof(struct vf_data_storage), GFP_KERNEL); + + /* if allocation failed then we do not support SR-IOV */ + if (!adapter->vf_data) { + adapter->vfs_allocated_count = 0; + err = -ENOMEM; + goto out; + } + + /* Due to the limited number of RAR entries calculate potential + * number of MAC filters available for the VFs. Reserve entries + * for PF default MAC, PF MAC filters and at least one RAR entry + * for each VF for VF MAC. + */ + num_vf_mac_filters = adapter->hw.mac.rar_entry_count - + (1 + IGB_PF_MAC_FILTERS_RESERVED + + adapter->vfs_allocated_count); + + adapter->vf_mac_list = kcalloc(num_vf_mac_filters, + sizeof(struct vf_mac_filter), + GFP_KERNEL); + + mac_list = adapter->vf_mac_list; + INIT_LIST_HEAD(&adapter->vf_macs.l); + + if (adapter->vf_mac_list) { + /* Initialize list of VF MAC filters */ + for (i = 0; i < num_vf_mac_filters; i++) { + mac_list->vf = -1; + mac_list->free = true; + list_add(&mac_list->l, &adapter->vf_macs.l); + mac_list++; + } + } else { + /* If we could not allocate memory for the VF MAC filters + * we can continue without this feature but warn user. + */ + dev_err(&pdev->dev, + "Unable to allocate memory for VF MAC filter list\n"); + } + + /* only call pci_enable_sriov() if no VFs are allocated already */ + if (!old_vfs) { + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } + dev_info(&pdev->dev, "%d VFs allocated\n", + adapter->vfs_allocated_count); + for (i = 0; i < adapter->vfs_allocated_count; i++) + igb_vf_configure(adapter, i); + + /* DMA Coalescing is not supported in IOV mode. */ + adapter->flags &= ~IGB_FLAG_DMAC; + goto out; + +err_out: + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; +out: + return err; +} + +#endif +/** + * igb_remove_i2c - Cleanup I2C interface + * @adapter: pointer to adapter structure + **/ +static void igb_remove_i2c(struct igb_adapter *adapter) +{ + /* free the adapter bus structure */ + i2c_del_adapter(&adapter->i2c_adap); +} + +/** + * igb_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igb_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void igb_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) { + ecdev_close(adapter->ecdev); + ecdev_withdraw(adapter->ecdev); + } + + pm_runtime_get_noresume(&pdev->dev); +#ifdef CONFIG_IGB_HWMON + igb_sysfs_exit(adapter); +#endif + igb_remove_i2c(adapter); + igb_ptp_stop(adapter); + /* The watchdog timer may be rescheduled, so explicitly + * disable watchdog from being rescheduled. + */ + set_bit(__IGB_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + +#ifdef CONFIG_IGB_DCA + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + dev_info(&pdev->dev, "DCA disabled\n"); + dca_remove_requester(&pdev->dev); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } +#endif + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif + + if (!adapter->ecdev) { + unregister_netdev(netdev); + } + + igb_clear_interrupt_scheme(adapter); + + pci_iounmap(pdev, adapter->io_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_mem_regions(pdev); + + kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/** + * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space + * @adapter: board private structure to initialize + * + * This function initializes the vf specific data storage and then attempts to + * allocate the VFs. The reason for ordering it this way is because it is much + * mor expensive time wise to disable SR-IOV than it is to allocate and free + * the memory for the VFs. + **/ +static void igb_probe_vfs(struct igb_adapter *adapter) +{ +#ifdef CONFIG_PCI_IOV + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + + /* Virtualization features not supported on i210 family. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + return; + + /* Of the below we really only want the effect of getting + * IGB_FLAG_HAS_MSIX set (if available), without which + * igb_enable_sriov() has no effect. + */ + igb_set_interrupt_capability(adapter, true); + igb_reset_interrupt_capability(adapter); + + pci_sriov_set_totalvfs(pdev, 7); + igb_enable_sriov(pdev, max_vfs); + +#endif /* CONFIG_PCI_IOV */ +} + +unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned int max_rss_queues; + + /* Determine the maximum number of RSS queues supported. */ + switch (hw->mac.type) { + case e1000_i211: + max_rss_queues = IGB_MAX_RX_QUEUES_I211; + break; + case e1000_82575: + case e1000_i210: + max_rss_queues = IGB_MAX_RX_QUEUES_82575; + break; + case e1000_i350: + /* I350 cannot do RSS and SR-IOV at the same time */ + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 1; + break; + } + fallthrough; + case e1000_82576: + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 2; + break; + } + fallthrough; + case e1000_82580: + case e1000_i354: + default: + max_rss_queues = IGB_MAX_RX_QUEUES; + break; + } + + return max_rss_queues; +} + +static void igb_init_queue_configuration(struct igb_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igb_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igb_set_flag_queue_pairs(adapter, max_rss_queues); +} + +void igb_set_flag_queue_pairs(struct igb_adapter *adapter, + const u32 max_rss_queues) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Determine if we need to pair queues. */ + switch (hw->mac.type) { + case e1000_82575: + case e1000_i211: + /* Device supports enough interrupts without queue pairing. */ + break; + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + default: + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; + break; + } +} + +/** + * igb_sw_init - Initialize general software structures (struct igb_adapter) + * @adapter: board private structure to initialize + * + * igb_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int igb_sw_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGB_DEFAULT_TXD; + adapter->rx_ring_count = IGB_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGB_DEFAULT_ITR; + adapter->tx_itr_setting = IGB_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; + + adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->nfc_lock); + spin_lock_init(&adapter->stats64_lock); +#ifdef CONFIG_PCI_IOV + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + if (max_vfs > 7) { + dev_warn(&pdev->dev, + "Maximum of 7 VFs per PF, using max\n"); + max_vfs = adapter->vfs_allocated_count = 7; + } else + adapter->vfs_allocated_count = max_vfs; + if (adapter->vfs_allocated_count) + dev_warn(&pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); + break; + default: + break; + } +#endif /* CONFIG_PCI_IOV */ + + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGB_FLAG_HAS_MSIX; + + adapter->mac_table = kcalloc(hw->mac.rar_entry_count, + sizeof(struct igb_mac_addr), + GFP_KERNEL); + if (!adapter->mac_table) + return -ENOMEM; + + igb_probe_vfs(adapter); + + igb_init_queue_configuration(adapter); + + /* Setup and initialize a copy of the hw vlan table array */ + adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), + GFP_KERNEL); + if (!adapter->shadow_vfta) + return -ENOMEM; + + /* This call may decrease the number of queues */ + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igb_irq_disable(adapter); + + if (hw->mac.type >= e1000_i350) + adapter->flags &= ~IGB_FLAG_DMAC; + + set_bit(__IGB_DOWN, &adapter->state); + return 0; +} + +/** + * __igb_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: indicates whether we are in a resume call + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int __igb_open(struct net_device *netdev, bool resuming) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + int i; + + /* disallow open during test */ + if (test_bit(__IGB_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 0); + } else { + netif_carrier_off(netdev); + } + + /* allocate transmit descriptors */ + err = igb_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igb_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igb_power_up_link(adapter); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + igb_configure(adapter); + + err = igb_request_irq(adapter); + if (err) + goto err_req_irq; + + if (!adapter->ecdev) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(adapter->netdev, + adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); + if (err) + goto err_set_queues; + } + + /* From here on the code is the same as igb_up() */ + clear_bit(__IGB_DOWN, &adapter->state); + + if (!adapter->ecdev) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); + } + + /* Clear any pending interrupts. */ + rd32(E1000_TSICR); + rd32(E1000_ICR); + + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + if (!adapter->ecdev) { + netif_tx_start_all_queues(netdev); + } + + if (!resuming) + pm_runtime_put(&pdev->dev); + + if (!adapter->ecdev) { + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + } + + return 0; + +err_set_queues: + igb_free_irq(adapter); +err_req_irq: + igb_release_hw_control(adapter); + igb_power_down_link(adapter); + igb_free_all_rx_resources(adapter); +err_setup_rx: + igb_free_all_tx_resources(adapter); +err_setup_tx: + igb_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igb_open(struct net_device *netdev) +{ + return __igb_open(netdev, false); +} + +/** + * __igb_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: indicates we are in a suspend call + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int __igb_close(struct net_device *netdev, bool suspending) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igb_down(adapter); + igb_free_irq(adapter); + + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + return 0; +} + +int igb_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igb_close(netdev, false); + return 0; +} + +/** + * igb_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int igb_setup_tx_resources(struct igb_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size; + + size = sizeof(struct igb_tx_buffer) * tx_ring->count; + + tx_ring->tx_buffer_info = vmalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_tx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igb_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Tx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + **/ +void igb_setup_tctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which is enabled by default on 82575 and 82576 */ + wr32(E1000_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + igb_config_collision_dist(hw); + + /* Enable transmits */ + tctl |= E1000_TCTL_EN; + + wr32(E1000_TCTL, tctl); +} + +/** + * igb_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + **/ +void igb_configure_tx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txdctl = 0; + u64 tdba = ring->dma; + int reg_idx = ring->reg_idx; + + wr32(E1000_TDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_tx_desc)); + wr32(E1000_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(E1000_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + E1000_TDT(reg_idx); + wr32(E1000_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGB_TX_PTHRESH; + txdctl |= IGB_TX_HTHRESH << 8; + txdctl |= IGB_TX_WTHRESH << 16; + + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct igb_tx_buffer) * ring->count); + + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + wr32(E1000_TXDCTL(reg_idx), txdctl); +} + +/** + * igb_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igb_configure_tx(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* disable the queues */ + for (i = 0; i < adapter->num_tx_queues; i++) + wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0); + + wrfl(); + usleep_range(10000, 20000); + + for (i = 0; i < adapter->num_tx_queues; i++) + igb_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igb_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int igb_setup_rx_resources(struct igb_ring *rx_ring) +{ + struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); + struct device *dev = rx_ring->dev; + int size; + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; + + rx_ring->rx_buffer_info = vmalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + if (!adapter->ecdev) + rx_ring->xdp_prog = adapter->xdp_prog; + + /* XDP RX-queue info */ + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index, 0) < 0) + goto err; + + return 0; + +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_rx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igb_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Rx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + **/ +static void igb_setup_mrqc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 j, num_rx_queues; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(E1000_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_rx_queues = 2; + break; + default: + break; + } + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGB_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGB_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igb_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + if (adapter->hw.mac.type >= e1000_82576) + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= E1000_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(E1000_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + /* If VMDq is enabled then we set the appropriate mode for that, else + * we default to RSS so that an RSS hash is calculated per packet even + * if we are only using one queue + */ + if (adapter->vfs_allocated_count) { + if (hw->mac.type > e1000_82575) { + /* Set the default pool for the PF's first queue */ + u32 vtctl = rd32(E1000_VT_CTL); + + vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | + E1000_VT_CTL_DISABLE_DEF_POOL); + vtctl |= adapter->vfs_allocated_count << + E1000_VT_CTL_DEFAULT_POOL_SHIFT; + wr32(E1000_VT_CTL, vtctl); + } + if (adapter->rss_queues > 1) + mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ; + else + mrqc |= E1000_MRQC_ENABLE_VMDQ; + } else { + mrqc |= E1000_MRQC_ENABLE_RSS_MQ; + } + igb_vmm_control(adapter); + + wr32(E1000_MRQC, mrqc); +} + +/** + * igb_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +void igb_setup_rctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(E1000_RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* enable stripping of CRC. It's unlikely this will break BMC + * redirection as it did with e1000. Newer features require + * that the HW strips the CRC. + */ + rctl |= E1000_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= E1000_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(E1000_RXDCTL(0), 0); + + /* Attention!!! For SR-IOV PF driver operations you must enable + * queue drop for all VF and PF queues to prevent head of line blocking + * if an un-trusted VF does not provide descriptors to hardware. + */ + if (adapter->vfs_allocated_count) { + /* set all queue drop enable bits */ + wr32(E1000_QDE, ALL_QUEUES); + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + wr32(E1000_RCTL, rctl); +} + +static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, + int vfn) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + if (size > MAX_JUMBO_FRAME_SIZE) + size = MAX_JUMBO_FRAME_SIZE; + + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= size | E1000_VMOLR_LPE; + wr32(E1000_VMOLR(vfn), vmolr); + + return 0; +} + +static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, + int vfn, bool enable) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val, reg; + + if (hw->mac.type < e1000_82576) + return; + + if (hw->mac.type == e1000_i350) + reg = E1000_DVMOLR(vfn); + else + reg = E1000_VMOLR(vfn); + + val = rd32(reg); + if (enable) + val |= E1000_VMOLR_STRVLAN; + else + val &= ~(E1000_VMOLR_STRVLAN); + wr32(reg, val); +} + +static inline void igb_set_vmolr(struct igb_adapter *adapter, + int vfn, bool aupe) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + /* This register exists only on 82576 and newer so if we are older then + * we should exit and do nothing + */ + if (hw->mac.type < e1000_82576) + return; + + vmolr = rd32(E1000_VMOLR(vfn)); + if (aupe) + vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ + else + vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ + + /* clear all bits that might not be set */ + vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); + + if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ + /* for VMDq only allow the VFs and pool 0 to accept broadcast and + * multicast packets + */ + if (vfn <= adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + + wr32(E1000_VMOLR(vfn), vmolr); +} + +/** + * igb_setup_srrctl - configure the split and replication receive control + * registers + * @adapter: Board private structure + * @ring: receive ring to be configured + **/ +void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u32 srrctl = 0; + + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + if (ring_uses_large_buffer(ring)) + srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + if (hw->mac.type >= e1000_82580) + srrctl |= E1000_SRRCTL_TIMESTAMP; + /* Only set Drop Enable if VFs allocated, or we are supporting multiple + * queues and rx flow control is disabled + */ + if (adapter->vfs_allocated_count || + (!(hw->fc.current_mode & e1000_fc_rx_pause) && + adapter->num_rx_queues > 1)) + srrctl |= E1000_SRRCTL_DROP_EN; + + wr32(E1000_SRRCTL(reg_idx), srrctl); +} + +/** + * igb_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + **/ +void igb_configure_rx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + union e1000_adv_rx_desc *rx_desc; + u64 rdba = ring->dma; + int reg_idx = ring->reg_idx; + u32 rxdctl = 0; + + if (!adapter->ecdev) { + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + } + + /* disable the queue */ + wr32(E1000_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(E1000_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(E1000_RDBAH(reg_idx), rdba >> 32); + wr32(E1000_RDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + E1000_RDT(reg_idx); + wr32(E1000_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* set descriptor configuration */ + igb_setup_srrctl(adapter, ring); + + /* set filtering for VMDQ pools */ + igb_set_vmolr(adapter, reg_idx & 0x7, true); + + rxdctl |= IGB_RX_PTHRESH; + rxdctl |= IGB_RX_HTHRESH << 8; + rxdctl |= IGB_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igb_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGB_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + wr32(E1000_RXDCTL(reg_idx), rxdctl); +} + +static void igb_set_rx_buffer_len(struct igb_adapter *adapter, + struct igb_ring *rx_ring) +{ + /* set build_skb and buffer size flags */ + clear_ring_build_skb_enabled(rx_ring); + clear_ring_uses_large_buffer(rx_ring); + + if (adapter->flags & IGB_FLAG_RX_LEGACY) + return; + + set_ring_build_skb_enabled(rx_ring); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + return; + + set_ring_uses_large_buffer(rx_ring); +#endif +} + +/** + * igb_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void igb_configure_rx(struct igb_adapter *adapter) +{ + int i; + + /* set the correct pool for the PF default MAC address in entry 0 */ + igb_set_default_mac_filter(adapter); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *rx_ring = adapter->rx_ring[i]; + + igb_set_rx_buffer_len(adapter, rx_ring); + igb_configure_rx_ring(adapter, rx_ring); + } +} + +/** + * igb_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void igb_free_tx_resources(struct igb_ring *tx_ring) +{ + igb_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igb_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void igb_free_all_tx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igb_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void igb_clean_tx_ring(struct igb_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + + while (i != tx_ring->next_to_use) { + union e1000_adv_tx_desc *eop_desc, *tx_desc; + + /* Free all the Tx ring sk_buffs */ + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + if (!adapter->ecdev) { + dev_kfree_skb_any(tx_buffer->skb); + } + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGB_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igb_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_tx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igb_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void igb_free_rx_resources(struct igb_ring *rx_ring) +{ + igb_clean_rx_ring(rx_ring); + + rx_ring->xdp_prog = NULL; + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igb_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void igb_free_all_rx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igb_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void igb_clean_rx_ring(struct igb_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igb_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igb_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * igb_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_rx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igb_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int igb_set_mac(struct net_device *netdev, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igb_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igb_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igb_write_mc_addr_list(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igb_update_mc_addr_list(hw, NULL, 0); + igb_restore_vf_multicasts(adapter); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igb_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static int igb_vlan_promisc_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, pf_id; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + case e1000_i350: + /* VLAN filtering needed for VLAN prio filter */ + if (adapter->netdev->features & NETIF_F_NTUPLE) + break; + fallthrough; + case e1000_82576: + case e1000_82580: + case e1000_i354: + /* VLAN filtering needed for pool filtering */ + if (adapter->vfs_allocated_count) + break; + fallthrough; + default: + return 1; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + return 0; + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + /* Add PF to all active pools */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + vlvf |= BIT(pf_id); + wr32(E1000_VLVF(i), vlvf); + } + +set_vfta: + /* Set all bits in the VLAN filter table array */ + for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;) + hw->mac.ops.write_vfta(hw, i, ~0U); + + /* Set flag so we don't redo unnecessary work */ + adapter->flags |= IGB_FLAG_VLAN_PROMISC; + + return 0; +} + +#define VFTA_BLOCK_SIZE 8 +static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * 32; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); + u32 i, vid, word, bits, pf_id; + + /* guarantee that we don't scrub out management VLAN */ + vid = adapter->mng_vlan_id; + if (vid >= vid_start && vid < vid_end) + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + /* pull VLAN ID from VLVF */ + vid = vlvf & VLAN_VID_MASK; + + /* only concern ourselves with a certain range */ + if (vid < vid_start || vid >= vid_end) + continue; + + if (vlvf & E1000_VLVF_VLANID_ENABLE) { + /* record VLAN ID in VFTA */ + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); + + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + bits = ~BIT(pf_id); + bits &= rd32(E1000_VLVF(i)); + wr32(E1000_VLVF(i), bits); + } + +set_vfta: + /* extract values from active_vlans and write back to VFTA */ + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * 32; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->active_vlans[word] >> bits; + + hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]); + } +} + +static void igb_vlan_promisc_disable(struct igb_adapter *adapter) +{ + u32 i; + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + + for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE) + igb_scrub_vfta(adapter, i); +} + +/** + * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igb_set_rx_mode(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = adapter->vfs_allocated_count; + u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + + /* enable use of UTA filter to force packets to default pool */ + if (hw->mac.type == e1000_82576) + vmolr |= E1000_VMOLR_ROPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igb_write_mc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else if (count) { + vmolr |= E1000_VMOLR_ROMPE; + } + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) { + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } + + /* enable VLAN filtering by default */ + rctl |= E1000_RCTL_VFE; + + /* disable VLAN filtering for modes that require it */ + if ((netdev->flags & IFF_PROMISC) || + (netdev->features & NETIF_F_RXALL)) { + /* if we fail to set all rules then just clear VFE */ + if (igb_vlan_promisc_enable(adapter)) + rctl &= ~E1000_RCTL_VFE; + } else { + igb_vlan_promisc_disable(adapter); + } + + /* update state of unicast, multicast, and VLAN filtering modes */ + rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE | + E1000_RCTL_VFE); + wr32(E1000_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (!adapter->vfs_allocated_count) { + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + rlpml = IGB_MAX_FRAME_BUILD_SKB; + } +#endif + wr32(E1000_RLPML, rlpml); + + /* In order to support SR-IOV and eventually VMDq it is necessary to set + * the VMOLR to enable the appropriate modes. Without this workaround + * we will have issues with VLAN tag stripping not being done for frames + * that are only arriving because we are the default pool + */ + if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) + return; + + /* set UTA to appropriate mode */ + igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE)); + + vmolr |= rd32(E1000_VMOLR(vfn)) & + ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); + + /* enable Rx jumbo frames, restrict as needed to support build_skb */ + vmolr &= ~E1000_VMOLR_RLPML_MASK; +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + vmolr |= IGB_MAX_FRAME_BUILD_SKB; + else +#endif + vmolr |= MAX_JUMBO_FRAME_SIZE; + vmolr |= E1000_VMOLR_LPE; + + wr32(E1000_VMOLR(vfn), vmolr); + + igb_restore_vf_multicasts(adapter); +} + +static void igb_check_wvbr(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 wvbr = 0; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + wvbr = rd32(E1000_WVBR); + if (!wvbr) + return; + break; + default: + break; + } + + adapter->wvbr |= wvbr; +} + +#define IGB_STAGGERED_QUEUE_OFFSET 8 + +static void igb_spoof_check(struct igb_adapter *adapter) +{ + int j; + + if (!adapter->wvbr) + return; + + for (j = 0; j < adapter->vfs_allocated_count; j++) { + if (adapter->wvbr & BIT(j) || + adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) { + dev_warn(&adapter->pdev->dev, + "Spoof event(s) detected on VF %d\n", j); + adapter->wvbr &= + ~(BIT(j) | + BIT(j + IGB_STAGGERED_QUEUE_OFFSET)); + } + } +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igb_update_phy_info(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer); + igb_get_phy_info(&adapter->hw); +} + +/** + * igb_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + **/ +bool igb_has_link(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the e1000_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (!hw->mac.get_link_status) + return true; + fallthrough; + case e1000_media_type_internal_serdes: + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + break; + default: + case e1000_media_type_unknown: + break; + } + + if (((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) && + (hw->phy.id == I210_I_PHY_ID)) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) +{ + bool ret = false; + u32 ctrl_ext, thstat; + + /* check for thermal sensor event on i350 copper only */ + if (hw->mac.type == e1000_i350) { + thstat = rd32(E1000_THSTAT); + ctrl_ext = rd32(E1000_CTRL_EXT); + + if ((hw->phy.media_type == e1000_media_type_copper) && + !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) + ret = !!(thstat & event); + } + + return ret; +} + +/** + * igb_check_lvmmc - check for malformed packets received + * and indicated in LVMMC register + * @adapter: pointer to adapter + **/ +static void igb_check_lvmmc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 lvmmc; + + lvmmc = rd32(E1000_LVMMC); + if (lvmmc) { + if (unlikely(net_ratelimit())) { + netdev_warn(adapter->netdev, + "malformed Tx packet detected and dropped, LVMMC:0x%08x\n", + lvmmc); + } + } +} + +/** + * igb_watchdog - Timer Call-back + * @t: pointer to timer_list containing our private info pointer + **/ +static void igb_watchdog(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igb_watchdog_task(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, + struct igb_adapter, + watchdog_task); + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_info *phy = &hw->phy; + struct net_device *netdev = adapter->netdev; + u32 link; + int i; + u32 connsw; + u16 phy_data, retry_count = 20; + + link = igb_has_link(adapter); + + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + /* Force link down if we have fiber to swap to */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + if (hw->phy.media_type == e1000_media_type_copper) { + connsw = rd32(E1000_CONNSW); + if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) + link = 0; + } + } + if (link) { + /* Perform a reset if the media type changed. */ + if (hw->dev_spec._82575.media_changed) { + hw->dev_spec._82575.media_changed = false; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + igb_reset(adapter); + } + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(E1000_CTRL); + /* Links status message must follow this format */ + netdev_info(netdev, + "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && + (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : + (ctrl & E1000_CTRL_RFCE) ? "RX" : + (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGB_FLAG_EEE) && + (adapter->link_duplex == HALF_DUPLEX)) { + dev_info(&adapter->pdev->dev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); + adapter->hw.dev_spec._82575.eee_disable = true; + adapter->flags &= ~IGB_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igb_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_LINK_THROTTLE)) + netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igb_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); + } + } else { + dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + igb_ping_all_vfs(adapter); + igb_check_vf_rate_limit(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_PWR_DOWN)) { + netdev_err(netdev, "The network adapter was stopped because it overheated\n"); + } + + /* Links status message must follow this format */ + netdev_info(netdev, "igb: %s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + igb_ping_all_vfs(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* link is down, time to check for alternate media */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + } + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *tx_ring = adapter->tx_ring[i]; + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(E1000_EICS, eics); + } else { + wr32(E1000_ICS, E1000_ICS_RXDMT0); + } + + igb_spoof_check(adapter); + igb_ptp_rx_hang(adapter); + igb_ptp_tx_hang(adapter); + + /* Check LVMMC register on i350/i354 only */ + if ((adapter->hw.mac.type == e1000_i350) || + (adapter->hw.mac.type == e1000_i354)) + igb_check_lvmmc(adapter); + + /* Reset the timer */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * igb_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igb_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + **/ +static void igb_update_ring_itr(struct igb_q_vector *q_vector) +{ + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + struct igb_adapter *adapter = q_vector->adapter; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + if (adapter->link_speed != SPEED_1000) { + new_val = IGB_4K_ITR; + goto set_itr_val; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if ((avg_wire_size > 300) && (avg_wire_size < 1200)) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGB_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGB_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +/** + * igb_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + **/ +static void igb_update_itr(struct igb_q_vector *q_vector, + struct igb_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes/packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igb_set_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = IGB_4K_ITR; + goto set_itr_now; + } + + igb_update_itr(q_vector, &q_vector->tx); + igb_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGB_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGB_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGB_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct e1000_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + struct timespec64 ts; + + context_desc = IGB_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; + + /* For 82575, context index must be unique per ring. */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + ts = ktime_to_timespec64(first->skb->tstamp); + skb_txtime_consumed(first->skb); + context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); + } else { + context_desc->seqnum_seed = 0; + } +} + +static int igb_tso(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? + E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM | + IGB_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; + + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGB_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; + + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); +} + +#define IGB_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = E1000_ADVTXD_DTYP_DATA | + E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN, + (E1000_ADVTXD_DCMD_VLE)); + + /* set segmentation bits for TSO */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO, + (E1000_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, + (E1000_ADVTXD_MAC_TSTAMP)); + + /* insert frame checksum */ + cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igb_tx_olinfo_status(struct igb_ring *tx_ring, + union e1000_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; + + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + + /* insert L4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_CSUM, + (E1000_TXD_POPTS_TXSM << 8)); + + /* insert IPv4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_IPV4, + (E1000_TXD_POPTS_IXSM << 8)); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + struct igb_adapter *adapter = netdev_priv(netdev); + + if (!adapter->ecdev) { + netif_stop_subqueue(netdev, tx_ring->queue_index); + } + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igb_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + if (!adapter->ecdev) { + netif_wake_subqueue(netdev, tx_ring->queue_index); + } + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + if (igb_desc_unused(tx_ring) >= size) + return 0; + return __igb_maybe_stop_tx(tx_ring, size); +} + +static int igb_tx_map(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = igb_tx_cmd_type(skb, tx_flags); + u16 i = tx_ring->next_to_use; + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + + tx_desc = IGB_TX_DESC(tx_ring, i); + + igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGB_MAX_DATA_PER_TXD; + size -= IGB_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + dma_wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + return 0; + +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (!adapter->ecdev) { + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + } + + tx_ring->next_to_use = i; + + return -1; +} + +int igb_xmit_xdp_ring(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + struct xdp_frame *xdpf) +{ + union e1000_adv_tx_desc *tx_desc; + u32 len, cmd_type, olinfo_status; + struct igb_tx_buffer *tx_buffer; + dma_addr_t dma; + u16 i; + + len = xdpf->len; + + if (unlikely(!igb_desc_unused(tx_ring))) + return IGB_XDP_CONSUMED; + + dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + return IGB_XDP_CONSUMED; + + /* record the location of the first descriptor for this packet */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + i = tx_ring->next_to_use; + tx_desc = IGB_TX_DESC(tx_ring, i); + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + tx_buffer->type = IGB_TYPE_XDP; + tx_buffer->xdpf = xdpf; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = E1000_ADVTXD_DTYP_DATA | + E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; + cmd_type |= len | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT; + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount); + + /* set the timestamp */ + tx_buffer->time_stamp = jiffies; + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); + + /* set next_to_watch value indicating a packet is present */ + i++; + if (i == tx_ring->count) + i = 0; + + tx_buffer->next_to_watch = tx_desc; + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + writel(i, tx_ring->tail); + + return IGB_XDP_TX; +} + +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, + struct igb_ring *tx_ring) +{ + struct igb_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + u8 hdr_len = 0; + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + + /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igb_maybe_stop_tx(tx_ring, count + 3)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGB_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (unlikely(!adapter->ecdev && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGB_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + if (adapter->hw.mac.type == e1000_82576) + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGB_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igb_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igb_tx_csum(tx_ring, first); + + if (igb_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; + + return NETDEV_TX_OK; + +out_drop: + if (!adapter->ecdev) { + dev_kfree_skb_any(first->skb); + first->skb = NULL; + } +cleanup_tx_tstamp: + if (unlikely(!adapter->ecdev && tx_flags & IGB_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + if (adapter->hw.mac.type == e1000_82576) + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + } + + return NETDEV_TX_OK; +} + +static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); +} + +/** + * igb_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + + if (hw->mac.type >= e1000_82580) + hw->dev_spec._82575.global_device_reset = true; + + schedule_work(&adapter->reset_task); + wr32(E1000_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +static void igb_reset_task(struct work_struct *work) +{ + struct igb_adapter *adapter; + adapter = container_of(work, struct igb_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igb_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igb_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igb_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + **/ +static void igb_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +/** + * igb_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int igb_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; + + if (adapter->xdp_prog) { + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + if (max_frame > igb_rx_bufsz(ring)) { + netdev_warn(adapter->netdev, + "Requested MTU size is not supported with XDP. Max frame size is %d\n", + max_frame); + return -EINVAL; + } + } + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igb_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igb_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igb_up(adapter); + else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + + return 0; +} + +/** + * igb_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void igb_update_stats(struct igb_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 reg, mpc; + int i; + u64 bytes, packets; + unsigned int start; + u64 _bytes, _packets; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + bytes = 0; + packets = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(E1000_RQDPC(i)); + if (hw->mac.type >= e1000_i210) + wr32(E1000_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(E1000_CRCERRS); + adapter->stats.gprc += rd32(E1000_GPRC); + adapter->stats.gorc += rd32(E1000_GORCL); + rd32(E1000_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(E1000_BPRC); + adapter->stats.mprc += rd32(E1000_MPRC); + adapter->stats.roc += rd32(E1000_ROC); + + adapter->stats.prc64 += rd32(E1000_PRC64); + adapter->stats.prc127 += rd32(E1000_PRC127); + adapter->stats.prc255 += rd32(E1000_PRC255); + adapter->stats.prc511 += rd32(E1000_PRC511); + adapter->stats.prc1023 += rd32(E1000_PRC1023); + adapter->stats.prc1522 += rd32(E1000_PRC1522); + adapter->stats.symerrs += rd32(E1000_SYMERRS); + adapter->stats.sec += rd32(E1000_SEC); + + mpc = rd32(E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(E1000_SCC); + adapter->stats.ecol += rd32(E1000_ECOL); + adapter->stats.mcc += rd32(E1000_MCC); + adapter->stats.latecol += rd32(E1000_LATECOL); + adapter->stats.dc += rd32(E1000_DC); + adapter->stats.rlec += rd32(E1000_RLEC); + adapter->stats.xonrxc += rd32(E1000_XONRXC); + adapter->stats.xontxc += rd32(E1000_XONTXC); + adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); + adapter->stats.xofftxc += rd32(E1000_XOFFTXC); + adapter->stats.fcruc += rd32(E1000_FCRUC); + adapter->stats.gptc += rd32(E1000_GPTC); + adapter->stats.gotc += rd32(E1000_GOTCL); + rd32(E1000_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(E1000_RNBC); + adapter->stats.ruc += rd32(E1000_RUC); + adapter->stats.rfc += rd32(E1000_RFC); + adapter->stats.rjc += rd32(E1000_RJC); + adapter->stats.tor += rd32(E1000_TORH); + adapter->stats.tot += rd32(E1000_TOTH); + adapter->stats.tpr += rd32(E1000_TPR); + + adapter->stats.ptc64 += rd32(E1000_PTC64); + adapter->stats.ptc127 += rd32(E1000_PTC127); + adapter->stats.ptc255 += rd32(E1000_PTC255); + adapter->stats.ptc511 += rd32(E1000_PTC511); + adapter->stats.ptc1023 += rd32(E1000_PTC1023); + adapter->stats.ptc1522 += rd32(E1000_PTC1522); + + adapter->stats.mptc += rd32(E1000_MPTC); + adapter->stats.bptc += rd32(E1000_BPTC); + + adapter->stats.tpt += rd32(E1000_TPT); + adapter->stats.colc += rd32(E1000_COLC); + + adapter->stats.algnerrc += rd32(E1000_ALGNERRC); + /* read internal phy specific stats */ + reg = rd32(E1000_CTRL_EXT); + if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { + adapter->stats.rxerrc += rd32(E1000_RXERRC); + + /* this stat has invalid values on i210/i211 */ + if ((hw->mac.type != e1000_i210) && + (hw->mac.type != e1000_i211)) + adapter->stats.tncrs += rd32(E1000_TNCRS); + } + + adapter->stats.tsctc += rd32(E1000_TSCTC); + adapter->stats.tsctfc += rd32(E1000_TSCTFC); + + adapter->stats.iac += rd32(E1000_IAC); + adapter->stats.icrxoc += rd32(E1000_ICRXOC); + adapter->stats.icrxptc += rd32(E1000_ICRXPTC); + adapter->stats.icrxatc += rd32(E1000_ICRXATC); + adapter->stats.ictxptc += rd32(E1000_ICTXPTC); + adapter->stats.ictxatc += rd32(E1000_ICTXATC); + adapter->stats.ictxqec += rd32(E1000_ICTXQEC); + adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); + adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(E1000_MGTPTC); + adapter->stats.mgprc += rd32(E1000_MGTPRC); + adapter->stats.mgpdc += rd32(E1000_MGTPDC); + + /* OS2BMC Stats */ + reg = rd32(E1000_MANC); + if (reg & E1000_MANC_EN_BMC2OS) { + adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); + adapter->stats.o2bspc += rd32(E1000_O2BSPC); + adapter->stats.b2ospc += rd32(E1000_B2OSPC); + adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); + } +} + +static void igb_tsync_interrupt(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR); + + if (tsicr & TSINTR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_SYS_WRAP; + } + + if (tsicr & E1000_TSICR_TXTS) { + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + ack |= E1000_TSICR_TXTS; + } + + if (tsicr & TSINTR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + /* u32 conversion of tv_sec is safe until y2106 */ + wr32(E1000_TRGTTIML0, ts.tv_nsec); + wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT0; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= TSINTR_TT0; + } + + if (tsicr & TSINTR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(E1000_TRGTTIML1, ts.tv_nsec); + wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT1; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= TSINTR_TT1; + } + + if (tsicr & TSINTR_AUTT0) { + nsec = rd32(E1000_AUXSTMPL0); + sec = rd32(E1000_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * 1000000000ULL + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_AUTT0; + } + + if (tsicr & TSINTR_AUTT1) { + nsec = rd32(E1000_AUXSTMPL1); + sec = rd32(E1000_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * 1000000000ULL + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(E1000_TSICR, ack); +} + +static irqreturn_t igb_msix_other(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct e1000_hw *hw = &adapter->hw; + u32 icr = rd32(E1000_ICR); + /* reading ICR causes bit 31 of EICR to be cleared */ + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + /* The DMA Out of Sync is also indication of a spoof event + * in IOV mode. Check the Wrong VM Behavior register to + * see if it is really a spoof event. + */ + igb_check_wvbr(adapter); + } + + /* Check for a mailbox event */ + if (icr & E1000_ICR_VMMB) + igb_msg_task(adapter); + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + wr32(E1000_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igb_write_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 itr_val = q_vector->itr_val & 0x7FFC; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = 0x4; + + if (adapter->hw.mac.type == e1000_82575) + itr_val |= itr_val << 16; + else + itr_val |= E1000_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igb_msix_ring(int irq, void *data) +{ + struct igb_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igb_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_IGB_DCA +static void igb_update_tx_dca(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + + if (hw->mac.type != e1000_82575) + txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; + + /* We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN | + E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); +} + +static void igb_update_rx_dca(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); + + if (hw->mac.type != e1000_82575) + rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; + + /* We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | + E1000_DCA_RXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); +} + +static void igb_update_dca(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + int cpu = get_cpu(); + + if (q_vector->cpu == cpu) + goto out_no_update; + + if (q_vector->tx.ring) + igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); + + if (q_vector->rx.ring) + igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); + + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +static void igb_setup_dca(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) + return; + + /* Always use CB2 mode, difference is masked in the CB driver. */ + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); + + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; + igb_update_dca(adapter->q_vector[i]); + } +} + +static int __igb_notify_dca(struct device *dev, void *data) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + unsigned long event = *(unsigned long *)data; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if already enabled, don't do it again */ + if (adapter->flags & IGB_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + break; + } + fallthrough; /* since DCA is disabled. */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + /* without this a class_device is left + * hanging around in the sysfs model + */ + dca_remove_requester(dev); + dev_info(&pdev->dev, "DCA disabled\n"); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } + break; + } + + return 0; +} + +static int igb_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, + __igb_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} +#endif /* CONFIG_IGB_DCA */ + +#ifdef CONFIG_PCI_IOV +static int igb_vf_configure(struct igb_adapter *adapter, int vf) +{ + unsigned char mac_addr[ETH_ALEN]; + + eth_zero_addr(mac_addr); + igb_set_vf_mac(adapter, vf, mac_addr); + + /* By default spoof check is enabled for all VFs */ + adapter->vf_data[vf].spoofchk_enabled = true; + + /* By default VFs are not trusted */ + adapter->vf_data[vf].trusted = false; + + return 0; +} + +#endif +static void igb_ping_all_vfs(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0 ; i < adapter->vfs_allocated_count; i++) { + ping = E1000_PF_CONTROL_MSG; + if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) + ping |= E1000_VT_MSGTYPE_CTS; + igb_write_mbx(hw, &ping, 1, i); + } +} + +static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr = rd32(E1000_VMOLR(vf)); + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | + IGB_VF_FLAG_MULTI_PROMISC); + vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { + vmolr |= E1000_VMOLR_MPME; + vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; + *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; + } else { + /* if we have hashes and we are clearing a multicast promisc + * flag we need to write the hashes to the MTA as this step + * was previously skipped + */ + if (vf_data->num_vf_mc_hashes > 30) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + int j; + + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + } + + wr32(E1000_VMOLR(vf), vmolr); + + /* there are flags left unprocessed, likely not supported */ + if (*msgbuf & E1000_VT_MSGINFO_MASK) + return -EINVAL; + + return 0; +} + +static int igb_set_vf_multicasts(struct igb_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + int i; + + /* salt away the number of multicast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vf_data->num_vf_mc_hashes = n; + + /* only up to 30 hash values supported */ + if (n > 30) + n = 30; + + /* store the hashes for later use */ + for (i = 0; i < n; i++) + vf_data->vf_mc_hashes[i] = hash_list[i]; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); + + return 0; +} + +static void igb_restore_vf_multicasts(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data; + int i, j; + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + u32 vmolr = rd32(E1000_VMOLR(i)); + + vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + vf_data = &adapter->vf_data[i]; + + if ((vf_data->num_vf_mc_hashes > 30) || + (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + wr32(E1000_VMOLR(i), vmolr); + } +} + +static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pool_mask, vlvf_mask, i; + + /* create mask for VF and other pools */ + pool_mask = E1000_VLVF_POOLSEL_MASK; + vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf); + + /* drop PF from pool bits */ + pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count); + + /* Find the vlan filter for this id */ + for (i = E1000_VLVF_ARRAY_SIZE; i--;) { + u32 vlvf = rd32(E1000_VLVF(i)); + u32 vfta_mask, vid, vfta; + + /* remove the vf from the pool */ + if (!(vlvf & vlvf_mask)) + continue; + + /* clear out bit from VLVF */ + vlvf ^= vlvf_mask; + + /* if other pools are present, just remove ourselves */ + if (vlvf & pool_mask) + goto update_vlvfb; + + /* if PF is present, leave VFTA */ + if (vlvf & E1000_VLVF_POOLSEL_MASK) + goto update_vlvf; + + vid = vlvf & E1000_VLVF_VLANID_MASK; + vfta_mask = BIT(vid % 32); + + /* clear bit from VFTA */ + vfta = adapter->shadow_vfta[vid / 32]; + if (vfta & vfta_mask) + hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask); +update_vlvf: + /* clear pool selection enable */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + vlvf &= E1000_VLVF_POOLSEL_MASK; + else + vlvf = 0; +update_vlvfb: + /* clear pool bits */ + wr32(E1000_VLVF(i), vlvf); + } +} + +static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan) +{ + u32 vlvf; + int idx; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the VLAN id in the VLVF entries */ + for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) { + vlvf = rd32(E1000_VLVF(idx)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + return idx; +} + +static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) +{ + struct e1000_hw *hw = &adapter->hw; + u32 bits, pf_id; + int idx; + + idx = igb_find_vlvf_entry(hw, vid); + if (!idx) + return; + + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK; + bits &= rd32(E1000_VLVF(idx)); + + /* Disable the filter so this falls into the default pool. */ + if (!bits) { + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + wr32(E1000_VLVF(idx), BIT(pf_id)); + else + wr32(E1000_VLVF(idx), 0); + } +} + +static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, + bool add, u32 vf) +{ + int pf_id = adapter->vfs_allocated_count; + struct e1000_hw *hw = &adapter->hw; + int err; + + /* If VLAN overlaps with one the PF is currently monitoring make + * sure that we are able to allocate a VLVF entry. This may be + * redundant but it guarantees PF will maintain visibility to + * the VLAN. + */ + if (add && test_bit(vid, adapter->active_vlans)) { + err = igb_vfta_set(hw, vid, pf_id, true, false); + if (err) + return err; + } + + err = igb_vfta_set(hw, vid, vf, add, false); + + if (add && !err) + return err; + + /* If we failed to add the VF VLAN or we are removing the VF VLAN + * we may need to drop the PF pool bit in order to allow us to free + * up the VLVF resources. + */ + if (test_bit(vid, adapter->active_vlans) || + (adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_update_pf_vlvf(adapter, vid); + + return err; +} + +static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + + if (vid) + wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); + else + wr32(E1000_VMVIR(vf), 0); +} + +static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf, + u16 vlan, u8 qos) +{ + int err; + + err = igb_set_vf_vlan(adapter, vlan, true, vf); + if (err) + return err; + + igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vlan); + + /* revoke access to previous VLAN */ + if (vlan != adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; + igb_set_vf_vlan_strip(adapter, vf, true); + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + + return err; +} + +static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf) +{ + /* Restore tagless access via VLAN 0 */ + igb_set_vf_vlan(adapter, 0, true, vf); + + igb_set_vmvir(adapter, 0, vf); + igb_set_vmolr(adapter, vf, true); + + /* Remove any PF assigned VLAN */ + if (adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; + igb_set_vf_vlan_strip(adapter, vf, false); + + return 0; +} + +static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) : + igb_disable_port_vlan(adapter, vf); +} + +static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + int ret; + + if (adapter->vf_data[vf].pf_vlan) + return -1; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + ret = igb_set_vf_vlan(adapter, vid, !!add, vf); + if (!ret) + igb_set_vf_vlan_strip(adapter, vf, !!vid); + return ret; +} + +static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) +{ + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + /* clear flags - except flag that indicates PF has set the MAC */ + vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC; + vf_data->last_nack = jiffies; + + /* reset vlans for device */ + igb_clear_vf_vfta(adapter, vf); + igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf); + igb_set_vmvir(adapter, vf_data->pf_vlan | + (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); + igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan)); + + /* reset multicast table array for vf */ + adapter->vf_data[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); +} + +static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) +{ + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + + /* clear mac address as we were hotplug removed/added */ + if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) + eth_zero_addr(vf_mac); + + /* process remaining reset events */ + igb_vf_reset(adapter, vf); +} + +static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + u32 reg, msgbuf[3]; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* process all the same items cleared in a function level reset */ + igb_vf_reset(adapter, vf); + + /* set vf mac address */ + igb_set_vf_mac(adapter, vf, vf_mac); + + /* enable transmit and receive for vf */ + reg = rd32(E1000_VFTE); + wr32(E1000_VFTE, reg | BIT(vf)); + reg = rd32(E1000_VFRE); + wr32(E1000_VFRE, reg | BIT(vf)); + + adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; + + /* reply to reset with ack and vf mac address */ + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK; + } + igb_write_mbx(hw, msgbuf, 3, vf); +} + +static void igb_flush_mac_table(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.rar_entry_count; i++) { + adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; + eth_zero_addr(adapter->mac_table[i].addr); + adapter->mac_table[i].queue = 0; + igb_rar_set_index(adapter, i); + } +} + +static int igb_available_rars(struct igb_adapter *adapter, u8 queue) +{ + struct e1000_hw *hw = &adapter->hw; + /* do not count rar entries reserved for VFs MAC addresses */ + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i, count = 0; + + for (i = 0; i < rar_entries; i++) { + /* do not count default entries */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) + continue; + + /* do not count "in use" entries for different queues */ + if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) && + (adapter->mac_table[i].queue != queue)) + continue; + + count++; + } + + return count; +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igb_set_default_mac_filter(struct igb_adapter *adapter) +{ + struct igb_mac_addr *mac_table = &adapter->mac_table[0]; + + ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); + mac_table->queue = adapter->vfs_allocated_count; + mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + + igb_rar_set_index(adapter, 0); +} + +/* If the filter to be added and an already existing filter express + * the same address and address type, it should be possible to only + * override the other configurations, for example the queue to steer + * traffic. + */ +static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry, + const u8 *addr, const u8 flags) +{ + if (!(entry->state & IGB_MAC_STATE_IN_USE)) + return true; + + if ((entry->state & IGB_MAC_STATE_SRC_ADDR) != + (flags & IGB_MAC_STATE_SRC_ADDR)) + return false; + + if (!ether_addr_equal(addr, entry->addr)) + return false; + + return true; +} + +/* Add a MAC filter for 'addr' directing matching traffic to 'queue', + * 'flags' is used to indicate what kind of match is made, match is by + * default for the destination address, if matching by source address + * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_add_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for the first empty entry in the MAC table. + * Do not touch entries at the end of the table reserved for the VF MAC + * addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!igb_mac_entry_can_be_used(&adapter->mac_table[i], + addr, flags)) + continue; + + ether_addr_copy(adapter->mac_table[i].addr, addr); + adapter->mac_table[i].queue = queue; + adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags; + + igb_rar_set_index(adapter, i); + return i; + } + + return -ENOSPC; +} + +static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + return igb_add_mac_filter_flags(adapter, addr, queue, 0); +} + +/* Remove a MAC filter for 'addr' directing matching traffic to + * 'queue', 'flags' is used to indicate what kind of match need to be + * removed, match is by default for the destination address, if + * matching by source address is to be removed the flag + * IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_del_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for matching entry in the MAC table based on given address + * and queue. Do not touch entries at the end of the table reserved + * for the VF MAC addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) + continue; + if ((adapter->mac_table[i].state & flags) != flags) + continue; + if (adapter->mac_table[i].queue != queue) + continue; + if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) + continue; + + /* When a filter for the default address is "deleted", + * we return it to its initial configuration + */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) { + adapter->mac_table[i].state = + IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + adapter->mac_table[i].queue = + adapter->vfs_allocated_count; + } else { + adapter->mac_table[i].state = 0; + adapter->mac_table[i].queue = 0; + eth_zero_addr(adapter->mac_table[i].addr); + } + + igb_rar_set_index(adapter, i); + return 0; + } + + return -ENOENT; +} + +static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, 0); +} + +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + + /* In theory, this should be supported on 82575 as well, but + * that part wasn't easily accessible during development. + */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + return igb_add_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + +static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count); + + return min_t(int, ret, 0); +} + +static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count); + + return 0; +} + +static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, + const u32 info, const u8 *addr) +{ + struct pci_dev *pdev = adapter->pdev; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + struct list_head *pos; + struct vf_mac_filter *entry = NULL; + int ret = 0; + + switch (info) { + case E1000_VF_MAC_FILTER_CLR: + /* remove all unicast MAC filters related to the current VF */ + list_for_each(pos, &adapter->vf_macs.l) { + entry = list_entry(pos, struct vf_mac_filter, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + igb_del_mac_filter(adapter, entry->vf_mac, vf); + } + } + break; + case E1000_VF_MAC_FILTER_ADD: + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { + dev_warn(&pdev->dev, + "VF %d requested MAC filter but is administratively denied\n", + vf); + return -EINVAL; + } + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC filter\n", + vf); + return -EINVAL; + } + + /* try to find empty slot in the list */ + list_for_each(pos, &adapter->vf_macs.l) { + entry = list_entry(pos, struct vf_mac_filter, l); + if (entry->free) + break; + } + + if (entry && entry->free) { + entry->free = false; + entry->vf = vf; + ether_addr_copy(entry->vf_mac, addr); + + ret = igb_add_mac_filter(adapter, addr, vf); + ret = min_t(int, ret, 0); + } else { + ret = -ENOSPC; + } + + if (ret == -ENOSPC) + dev_warn(&pdev->dev, + "VF %d has requested MAC filter but there is no space for it\n", + vf); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) +{ + struct pci_dev *pdev = adapter->pdev; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + u32 info = msg[0] & E1000_VT_MSGINFO_MASK; + + /* The VF MAC Address is stored in a packed array of bytes + * starting at the second 32 bit word of the msg array + */ + unsigned char *addr = (unsigned char *)&msg[1]; + int ret = 0; + + if (!info) { + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { + dev_warn(&pdev->dev, + "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", + vf); + return -EINVAL; + } + + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC\n", + vf); + return -EINVAL; + } + + ret = igb_set_vf_mac(adapter, vf, addr); + } else { + ret = igb_set_vf_mac_filter(adapter, vf, info, addr); + } + + return ret; +} + +static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + u32 msg = E1000_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!(vf_data->flags & IGB_VF_FLAG_CTS) && + time_after(jiffies, vf_data->last_nack + (2 * HZ))) { + igb_write_mbx(hw, &msg, 1, vf); + vf_data->last_nack = jiffies; + } +} + +static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct pci_dev *pdev = adapter->pdev; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + s32 retval; + + retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false); + + if (retval) { + /* if receive failed revoke VF CTS stats and restart init */ + dev_err(&pdev->dev, "Error receiving message from VF\n"); + vf_data->flags &= ~IGB_VF_FLAG_CTS; + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + goto unlock; + goto out; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) + goto unlock; + + /* until the vf completes a reset it should not be + * allowed to start any configuration. + */ + if (msgbuf[0] == E1000_VF_RESET) { + /* unlocks mailbox */ + igb_vf_reset_msg(adapter, vf); + return; + } + + if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + goto unlock; + retval = -1; + goto out; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case E1000_VF_SET_MAC_ADDR: + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case E1000_VF_SET_PROMISC: + retval = igb_set_vf_promisc(adapter, msgbuf, vf); + break; + case E1000_VF_SET_MULTICAST: + retval = igb_set_vf_multicasts(adapter, msgbuf, vf); + break; + case E1000_VF_SET_LPE: + retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); + break; + case E1000_VF_SET_VLAN: + retval = -1; + if (vf_data->pf_vlan) + dev_warn(&pdev->dev, + "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", + vf); + else + retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + default: + dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); + retval = -1; + break; + } + + msgbuf[0] |= E1000_VT_MSGTYPE_CTS; +out: + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; + else + msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + + /* unlocks mailbox */ + igb_write_mbx(hw, msgbuf, 1, vf); + return; + +unlock: + igb_unlock_mbx(hw, vf); +} + +static void igb_msg_task(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vf; + + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { + /* process any reset requests */ + if (!igb_check_for_rst(hw, vf)) + igb_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!igb_check_for_msg(hw, vf)) + igb_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!igb_check_for_ack(hw, vf)) + igb_rcv_ack_from_vf(adapter, vf); + } +} + +/** + * igb_set_uta - Set unicast filter table address + * @adapter: board private structure + * @set: boolean indicating if we are setting or clearing bits + * + * The unicast table address is a register array of 32-bit registers. + * The table is meant to be used in a way similar to how the MTA is used + * however due to certain limitations in the hardware it is necessary to + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous + * enable bit to allow vlan tag stripping when promiscuous mode is enabled + **/ +static void igb_set_uta(struct igb_adapter *adapter, bool set) +{ + struct e1000_hw *hw = &adapter->hw; + u32 uta = set ? ~0 : 0; + int i; + + /* we only need to do this if VMDq is enabled */ + if (!adapter->vfs_allocated_count) + return; + + for (i = hw->mac.uta_reg_count; i--;) + array_wr32(E1000_UTA, i, uta); +} + +/** + * igb_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr_msi(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(E1000_ICR); + + igb_write_itr(q_vector); + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igb_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(E1000_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igb_write_itr(q_vector); + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igb_ring_irq_enable(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if ((adapter->num_q_vectors == 1) && !adapter->vf_data) + igb_set_itr(q_vector); + else + igb_update_ring_itr(q_vector); + } + + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) + wr32(E1000_EIMS, q_vector->eims_value); + else + igb_irq_enable(adapter); + } +} + +/** + * igb_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + **/ +static int igb_poll(struct napi_struct *napi, int budget) +{ + struct igb_q_vector *q_vector = container_of(napi, + struct igb_q_vector, + napi); + bool clean_complete = true; + int work_done = 0; + +#ifdef CONFIG_IGB_DCA + if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) + igb_update_dca(q_vector); +#endif + if (q_vector->tx.ring) + clean_complete = igb_clean_tx_irq(q_vector, budget); + + if (q_vector->rx.ring) { + int cleaned = igb_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igb_ring_irq_enable(q_vector); + + return min(work_done, budget - 1); +} + +/** + * igb_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + **/ +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *tx_ring = q_vector->tx.ring; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__IGB_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGB_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + if (!adapter->ecdev) { + if (tx_buffer->type == IGB_TYPE_SKB) + napi_consume_skb(tx_buffer->skb, napi_budget); + else + xdp_return_frame(tx_buffer->xdpf); + } + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + dma_unmap_len_set(tx_buffer, len, 0); + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + if (!adapter->ecdev) { + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + } + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; + u64_stats_update_end(&tx_ring->tx_syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (!adapter->ecdev && + test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct e1000_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + dev_err(tx_ring->dev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(E1000_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(!adapter->ecdev && total_packets && + netif_carrier_ok(tx_ring->netdev) && + igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +/** + * igb_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void igb_reuse_rx_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *old_buff) +{ + struct igb_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, + int rx_buf_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGB_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGB_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igb_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + **/ +static void igb_add_rx_frag(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGB_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + unsigned int size = xdp->data_end - xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGB_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (xdp->data + headlen) - page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* build an skb around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); + + if (metasize) + skb_metadata_set(skb, metasize); + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + struct xdp_buff *xdp) +{ + int err, result = IGB_XDP_PASS; + struct bpf_prog *xdp_prog; + u32 act; + + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) + goto xdp_out; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + result = igb_xdp_xmit_back(adapter, xdp); + if (result == IGB_XDP_CONSUMED) + goto out_failure; + break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (err) + goto out_failure; + result = IGB_XDP_REDIR; + break; + default: +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5 + bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act); +#else + bpf_warn_invalid_xdp_action(act); +#endif + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = IGB_XDP_CONSUMED; + break; + } +xdp_out: + return ERR_PTR(-result); +} + +static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGB_SKB_PAD + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +static void igb_rx_buffer_flip(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + unsigned int size) +{ + unsigned int truesize = igb_rx_frame_truesize(rx_ring, size); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static inline void igb_rx_checksum(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igb_test_staterr(rx_desc, + E1000_RXDEXT_STATERR_TCPE | + E1000_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets, (aka let the stack check the crc32c) + */ + if (!((skb->len == 60) && + test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | + E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(ring->dev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +static inline void igb_rx_hash(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); +} + +/** + * igb_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool igb_is_non_eop(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGB_RX_DESC(rx_ring, ntc)); + + if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igb_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool igb_cleanup_headers(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely((igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { + struct net_device *netdev = rx_ring->netdev; + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * igb_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + + igb_rx_hash(rx_ring, rx_desc, skb); + + igb_rx_checksum(rx_ring, rx_desc, skb); + + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && + !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { + u16 vid; + + if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && + test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static unsigned int igb_rx_offset(struct igb_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; +} + +static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, + const unsigned int size, int *rx_buf_pgcnt) +{ + struct igb_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buf_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igb_put_rx_buffer(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt) +{ + if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) { + /* hand second half of page back to the ring */ + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = igb_desc_unused(rx_ring); + unsigned int xdp_xmit = 0; + struct xdp_buff xdp; + u32 frame_sz = 0; + int rx_buf_pgcnt; + + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#if (PAGE_SIZE < 8192) + frame_sz = igb_rx_frame_truesize(rx_ring, 0); +#endif + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); + + while (likely(total_packets < budget)) { + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *rx_buffer; + ktime_t timestamp = 0; + int pkt_offset = 0; + unsigned int size; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGB_RX_BUFFER_WRITE) { + igb_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + if (adapter->ecdev) { + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); + ecdev_receive(adapter->ecdev, pktbuf, size); + adapter->ec_watchdog_jiffies = jiffies; + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* pull rx packet timestamp if available and valid */ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + int ts_hdr_len; + + ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector, + pktbuf, ×tamp); + + pkt_offset += ts_hdr_len; + size -= ts_hdr_len; + } + + /* retrieve a buffer from the ring */ + if (!skb) { + unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring); + unsigned int offset = pkt_offset + igb_rx_offset(rx_ring); + + xdp_prepare_buff(&xdp, hard_start, offset, size, true); + #if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); + #endif + skb = igb_run_xdp(adapter, rx_ring, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) { + xdp_xmit |= xdp_res; + igb_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_packets++; + total_bytes += size; + } else if (skb) + igb_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igb_build_skb(rx_ring, rx_buffer, &xdp, + timestamp); + else + skb = igb_construct_skb(rx_ring, rx_buffer, + &xdp, timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt); + } + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igb_is_non_eop(rx_ring, rx_desc)) + continue; + + if (adapter->ecdev) { + total_packets++; + continue; + } + + /* verify the packet layout is correct */ + if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + if (xdp_xmit & IGB_XDP_REDIR) + xdp_do_flush(); + + if (xdp_xmit & IGB_XDP_TX) { + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); + + igb_xdp_ring_update_tail(tx_ring); + } + + u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; + u64_stats_update_end(&rx_ring->rx_syncp); + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; + + if (cleaned_count) + igb_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igb_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igb_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, igb_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igb_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igb_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: rx descriptor ring to allocate new receive buffers + * @cleaned_count: count of buffers to allocate + **/ +void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGB_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igb_rx_bufsz(rx_ring); + + do { + if (!igb_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGB_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, rx_ring->tail); + } +} + +/** + * igb_mii_ioctl - + * @netdev: pointer to netdev struct + * @ifr: interface structure + * @cmd: ioctl command to execute + **/ +static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) + return -EIO; + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * igb_ioctl - + * @netdev: pointer to netdev struct + * @ifr: interface structure + * @cmd: ioctl command to execute + **/ +static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return igb_mii_ioctl(netdev, ifr, cmd); + case SIOCGHWTSTAMP: + return igb_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igb_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_read_word(adapter->pdev, reg, value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_write_word(adapter->pdev, reg, *value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl; + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + + /* Disable CFI check */ + rctl = rd32(E1000_RCTL); + rctl &= ~E1000_RCTL_CFIEN; + wr32(E1000_RCTL, rctl); + } else { + /* disable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl &= ~E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + } + + igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable); +} + +static int igb_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + + /* add the filter since PF can receive vlans w/o entry in vlvf */ + if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, true, !!vid); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int igb_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int pf_id = adapter->vfs_allocated_count; + struct e1000_hw *hw = &adapter->hw; + + /* remove VID from filter table */ + if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, false, true); + + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +static void igb_restore_vlan(struct igb_adapter *adapter) +{ + u16 vid = 1; + + igb_vlan_mode(adapter->netdev, adapter->netdev->features); + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NIC's only allow 1000 gbps Full duplex + * and 100Mbps Full duplex for 100baseFx sfp + */ + if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + case SPEED_10 + DUPLEX_FULL: + case SPEED_100 + DUPLEX_HALF: + goto err_inval; + default: + break; + } + } + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + bool wake; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igb_close(netdev, true); + + igb_ptp_suspend(adapter); + + igb_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + igb_setup_rctl(adapter); + igb_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = rd32(E1000_RCTL); + rctl |= E1000_RCTL_MPE; + wr32(E1000_RCTL, rctl); + } + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_ADVD3WUC; + wr32(E1000_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igb_disable_pcie_master(hw); + + wr32(E1000_WUC, E1000_WUC_PME_EN); + wr32(E1000_WUFC, wufc); + } else { + wr32(E1000_WUC, 0); + wr32(E1000_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igb_power_down_link(adapter); + else + igb_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +static void igb_deliver_wake_packet(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if ((wupl == 0) || (wupl > E1000_WUPM_BYTES)) + return; + + skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igb_suspend(struct device *dev) +{ + return __igb_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused igb_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "igb: Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + val = rd32(E1000_WUS); + if (val & WAKE_PKT_WUS) + igb_deliver_wake_packet(netdev); + + wr32(E1000_WUS, ~0); + + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igb_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; +} + +static int __maybe_unused igb_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (!igb_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} + +static int __maybe_unused igb_runtime_suspend(struct device *dev) +{ + return __igb_shutdown(to_pci_dev(dev), NULL, 1); +} + +static int __maybe_unused igb_runtime_resume(struct device *dev) +{ + return igb_resume(dev); +} + +static void igb_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igb_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PCI_IOV +static int igb_sriov_reinit(struct pci_dev *dev) +{ + struct net_device *netdev = pci_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + rtnl_lock(); + + if (netif_running(netdev)) + igb_close(netdev); + else + igb_reset(adapter); + + igb_clear_interrupt_scheme(adapter); + + igb_init_queue_configuration(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + rtnl_unlock(); + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + igb_open(netdev); + + rtnl_unlock(); + + return 0; +} + +static int igb_pci_disable_sriov(struct pci_dev *dev) +{ + int err = igb_disable_sriov(dev); + + if (!err) + err = igb_sriov_reinit(dev); + + return err; +} + +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) +{ + int err = igb_enable_sriov(dev, num_vfs); + + if (err) + goto out; + + err = igb_sriov_reinit(dev); + if (!err) + return num_vfs; + +out: + return err; +} + +#endif +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + if (num_vfs == 0) + return igb_pci_disable_sriov(dev); + else + return igb_pci_enable_sriov(dev, num_vfs); +#endif + return 0; +} + +/** + * igb_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igb_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igb_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igb_resume routine. + **/ +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter lose its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igb_reset(adapter); + wr32(E1000_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igb_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igb_resume routine. + */ +static void igb_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (igb_up(adapter)) { + dev_err(&pdev->dev, "igb_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); +} + +/** + * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table + * @adapter: Pointer to adapter structure + * @index: Index of the RAR entry which need to be synced with MAC table + **/ +static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rar_low, rar_high; + u8 *addr = adapter->mac_table[index].addr; + + /* HW expects these to be in network order when they are plugged + * into the registers which are little endian. In order to guarantee + * that ordering we need to do an leXX_to_cpup here in order to be + * ready for the byteswap that occurs with writel + */ + rar_low = le32_to_cpup((__le32 *)(addr)); + rar_high = le16_to_cpup((__le16 *)(addr + 4)); + + /* Indicate to hardware the Address is Valid. */ + if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) { + if (is_valid_ether_addr(addr)) + rar_high |= E1000_RAH_AV; + + if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR) + rar_high |= E1000_RAH_ASEL_SRC_ADDR; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: + if (adapter->mac_table[index].state & + IGB_MAC_STATE_QUEUE_STEERING) + rar_high |= E1000_RAH_QSEL_ENABLE; + + rar_high |= E1000_RAH_POOL_1 * + adapter->mac_table[index].queue; + break; + default: + rar_high |= E1000_RAH_POOL_1 << + adapter->mac_table[index].queue; + break; + } + } + + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +static int igb_set_vf_mac(struct igb_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + struct e1000_hw *hw = &adapter->hw; + /* VF MAC addresses start at end of receive addresses and moves + * towards the first, as a result a collision should not be possible + */ + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses; + + ether_addr_copy(vf_mac_addr, mac_addr); + ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr); + adapter->mac_table[rar_entry].queue = vf; + adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE; + igb_rar_set_index(adapter, rar_entry); + + return 0; +} + +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC + * flag and allows to overwrite the MAC via VF netdev. This + * is necessary to allow libvirt a way to restore the original + * MAC after unbinding vfio-pci and reloading igbvf after shutting + * down a VM. + */ + if (is_zero_ether_addr(mac)) { + adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, + "remove administratively set MAC on VF %d\n", + vf); + } else if (is_valid_ether_addr(mac)) { + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", + mac, vf); + dev_info(&adapter->pdev->dev, + "Reload the VF driver to make this change effective."); + /* Generate additional warning if PF is down */ + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { + return -EINVAL; + } + return igb_set_vf_mac(adapter, vf, mac); +} + +static int igb_link_mbps(int internal_link_speed) +{ + switch (internal_link_speed) { + case SPEED_100: + return 100; + case SPEED_1000: + return 1000; + default: + return 0; + } +} + +static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, + int link_speed) +{ + int rf_dec, rf_int; + u32 bcnrc_val; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); + rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) / + tx_rate; + + bcnrc_val = E1000_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) & + E1000_RTTBCNRC_RF_INT_MASK); + bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ + /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. + */ + wr32(E1000_RTTBCNRM, 0x14); + wr32(E1000_RTTBCNRC, bcnrc_val); +} + +static void igb_check_vf_rate_limit(struct igb_adapter *adapter) +{ + int actual_link_speed, i; + bool reset_rate = false; + + /* VF TX rate limit was not set or not supported */ + if ((adapter->vf_rate_link_speed == 0) || + (adapter->hw.mac.type != e1000_82576)) + return; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if (actual_link_speed != adapter->vf_rate_link_speed) { + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + if (reset_rate) + adapter->vf_data[i].tx_rate = 0; + + igb_set_vf_rate_limit(&adapter->hw, i, + adapter->vf_data[i].tx_rate, + actual_link_speed); + } +} + +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int actual_link_speed; + + if (hw->mac.type != e1000_82576) + return -EOPNOTSUPP; + + if (min_tx_rate) + return -EINVAL; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if ((vf >= adapter->vfs_allocated_count) || + (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || + (max_tx_rate < 0) || + (max_tx_rate > actual_link_speed)) + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; + adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; + igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); + + return 0; +} + +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 reg_val, reg_offset; + + if (!adapter->vfs_allocated_count) + return -EOPNOTSUPP; + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; + reg_val = rd32(reg_offset); + if (setting) + reg_val |= (BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); + else + reg_val &= ~(BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); + wr32(reg_offset, reg_val); + + adapter->vf_data[vf].spoofchk_enabled = setting; + return 0; +} + +static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + if (adapter->vf_data[vf].trusted == setting) + return 0; + + adapter->vf_data[vf].trusted = setting; + + dev_info(&adapter->pdev->dev, "VF %u is %strusted\n", + vf, setting ? "" : "not "); + return 0; +} + +static int igb_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); + ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; + ivi->min_tx_rate = 0; + ivi->vlan = adapter->vf_data[vf].pf_vlan; + ivi->qos = adapter->vf_data[vf].pf_qos; + ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; + ivi->trusted = adapter->vf_data[vf].trusted; + return 0; +} + +static void igb_vmm_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: + case e1000_i211: + case e1000_i354: + default: + /* replication is not supported for 82575 */ + return; + case e1000_82576: + /* notify HW that the MAC is adding vlan tags */ + reg = rd32(E1000_DTXCTL); + reg |= E1000_DTXCTL_VLAN_ADDED; + wr32(E1000_DTXCTL, reg); + fallthrough; + case e1000_82580: + /* enable replication vlan tag stripping */ + reg = rd32(E1000_RPLOLR); + reg |= E1000_RPLOLR_STRVLAN; + wr32(E1000_RPLOLR, reg); + fallthrough; + case e1000_i350: + /* none of the above registers are supported by i350 */ + break; + } + + if (adapter->vfs_allocated_count) { + igb_vmdq_set_loopback_pf(hw, true); + igb_vmdq_set_replication_pf(hw, true); + igb_vmdq_set_anti_spoofing_pf(hw, true, + adapter->vfs_allocated_count); + } else { + igb_vmdq_set_loopback_pf(hw, false); + igb_vmdq_set_replication_pf(hw, false); + } +} + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) +{ + struct e1000_hw *hw = &adapter->hw; + u32 dmac_thr; + u16 hwm; + + if (hw->mac.type > e1000_82580) { + if (adapter->flags & IGB_FLAG_DMAC) { + u32 reg; + + /* force threshold to 0. */ + wr32(E1000_DMCTXTH, 0); + + /* DMA Coalescing high water mark needs to be greater + * than the Rx threshold. Set hwm to PBA - max frame + * size in 16B units, capping it at PBA - 6KB. + */ + hwm = 64 * (pba - 6); + reg = rd32(E1000_FCRTC); + reg &= ~E1000_FCRTC_RTH_COAL_MASK; + reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) + & E1000_FCRTC_RTH_COAL_MASK); + wr32(E1000_FCRTC, reg); + + /* Set the DMA Coalescing Rx threshold to PBA - 2 * max + * frame size, capping it at PBA - 10KB. + */ + dmac_thr = pba - 10; + reg = rd32(E1000_DMACR); + reg &= ~E1000_DMACR_DMACTHR_MASK; + reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); + + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + + /* watchdog timer= +-1000 usec in 32usec intervals */ + reg |= (1000 >> 5); + + /* Disable BMC-to-OS Watchdog Enable */ + if (hw->mac.type != e1000_i354) + reg &= ~E1000_DMACR_DC_BMC2OSW_EN; + + wr32(E1000_DMACR, reg); + + /* no lower threshold to disable + * coalescing(smart fifb)-UTRESH=0 + */ + wr32(E1000_DMCRTRH, 0); + + reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); + + wr32(E1000_DMCTLX, reg); + + /* free space in tx packet buffer to wake from + * DMA coal + */ + wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - + (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + + /* make low power state decision controlled + * by DMA coal + */ + reg = rd32(E1000_PCIEMISC); + reg &= ~E1000_PCIEMISC_LX_DECISION; + wr32(E1000_PCIEMISC, reg); + } /* endif adapter->dmac is not disabled */ + } else if (hw->mac.type == e1000_82580) { + u32 reg = rd32(E1000_PCIEMISC); + + wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); + wr32(E1000_DMACR, 0); + } +} + +/** + * igb_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = 0; + + if (!this_client) + return E1000_ERR_I2C; + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + + status = i2c_smbus_read_byte_data(this_client, byte_offset); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status < 0) + return E1000_ERR_I2C; + else { + *data = status; + return 0; + } +} + +/** + * igb_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = E1000_SWFW_PHY0_SM; + + if (!this_client) + return E1000_ERR_I2C; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + status = i2c_smbus_write_byte_data(this_client, byte_offset, data); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status) + return E1000_ERR_I2C; + else + return 0; + +} + +int igb_reinit_queues(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (netif_running(netdev)) + igb_close(netdev); + + igb_reset_interrupt_capability(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igb_open(netdev); + + return err; +} + +static void igb_nfc_filter_exit(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_erase_filter(adapter, rule); + + hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) + igb_erase_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} + +static void igb_nfc_filter_restore(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_add_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} +/* igb_main.c */ diff --git a/devices/igb/igb_main-5.14-orig.c b/devices/igb/igb_main-5.14-orig.c new file mode 100644 index 00000000..171a7a62 --- /dev/null +++ b/devices/igb/igb_main-5.14-orig.c @@ -0,0 +1,9996 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_IGB_DCA +#include +#endif +#include +#include "igb.h" + +enum queue_mode { + QUEUE_MODE_STRICT_PRIORITY, + QUEUE_MODE_STREAM_RESERVATION, +}; + +enum tx_queue_prio { + TX_QUEUE_PRIO_HIGH, + TX_QUEUE_PRIO_LOW, +}; + +char igb_driver_name[] = "igb"; +static const char igb_driver_string[] = + "Intel(R) Gigabit Ethernet Network Driver"; +static const char igb_copyright[] = + "Copyright (c) 2007-2014 Intel Corporation."; + +static const struct e1000_info *igb_info_tbl[] = { + [board_82575] = &e1000_82575_info, +}; + +static const struct pci_device_id igb_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, igb_pci_tbl); + +static int igb_setup_all_tx_resources(struct igb_adapter *); +static int igb_setup_all_rx_resources(struct igb_adapter *); +static void igb_free_all_tx_resources(struct igb_adapter *); +static void igb_free_all_rx_resources(struct igb_adapter *); +static void igb_setup_mrqc(struct igb_adapter *); +static int igb_probe(struct pci_dev *, const struct pci_device_id *); +static void igb_remove(struct pci_dev *pdev); +static int igb_sw_init(struct igb_adapter *); +int igb_open(struct net_device *); +int igb_close(struct net_device *); +static void igb_configure(struct igb_adapter *); +static void igb_configure_tx(struct igb_adapter *); +static void igb_configure_rx(struct igb_adapter *); +static void igb_clean_all_tx_rings(struct igb_adapter *); +static void igb_clean_all_rx_rings(struct igb_adapter *); +static void igb_clean_tx_ring(struct igb_ring *); +static void igb_clean_rx_ring(struct igb_ring *); +static void igb_set_rx_mode(struct net_device *); +static void igb_update_phy_info(struct timer_list *); +static void igb_watchdog(struct timer_list *); +static void igb_watchdog_task(struct work_struct *); +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); +static void igb_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static int igb_change_mtu(struct net_device *, int); +static int igb_set_mac(struct net_device *, void *); +static void igb_set_uta(struct igb_adapter *adapter, bool set); +static irqreturn_t igb_intr(int irq, void *); +static irqreturn_t igb_intr_msi(int irq, void *); +static irqreturn_t igb_msix_other(int irq, void *); +static irqreturn_t igb_msix_ring(int irq, void *); +#ifdef CONFIG_IGB_DCA +static void igb_update_dca(struct igb_q_vector *); +static void igb_setup_dca(struct igb_adapter *); +#endif /* CONFIG_IGB_DCA */ +static int igb_poll(struct napi_struct *, int); +static bool igb_clean_tx_irq(struct igb_q_vector *, int); +static int igb_clean_rx_irq(struct igb_q_vector *, int); +static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); +static void igb_tx_timeout(struct net_device *, unsigned int txqueue); +static void igb_reset_task(struct work_struct *); +static void igb_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); +static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); +static void igb_restore_vlan(struct igb_adapter *); +static void igb_rar_set_index(struct igb_adapter *, u32); +static void igb_ping_all_vfs(struct igb_adapter *); +static void igb_msg_task(struct igb_adapter *); +static void igb_vmm_control(struct igb_adapter *); +static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); +static void igb_flush_mac_table(struct igb_adapter *); +static int igb_available_rars(struct igb_adapter *, u8); +static void igb_set_default_mac_filter(struct igb_adapter *); +static int igb_uc_sync(struct net_device *, const unsigned char *); +static int igb_uc_unsync(struct net_device *, const unsigned char *); +static void igb_restore_vf_multicasts(struct igb_adapter *adapter); +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos, __be16 vlan_proto); +static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting); +static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, + bool setting); +static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); +static void igb_check_vf_rate_limit(struct igb_adapter *); +static void igb_nfc_filter_exit(struct igb_adapter *adapter); +static void igb_nfc_filter_restore(struct igb_adapter *adapter); + +#ifdef CONFIG_PCI_IOV +static int igb_vf_configure(struct igb_adapter *adapter, int vf); +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); +static int igb_disable_sriov(struct pci_dev *dev); +static int igb_pci_disable_sriov(struct pci_dev *dev); +#endif + +static int igb_suspend(struct device *); +static int igb_resume(struct device *); +static int igb_runtime_suspend(struct device *dev); +static int igb_runtime_resume(struct device *dev); +static int igb_runtime_idle(struct device *dev); +static const struct dev_pm_ops igb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) + SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle) +}; +static void igb_shutdown(struct pci_dev *); +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#ifdef CONFIG_IGB_DCA +static int igb_notify_dca(struct notifier_block *, unsigned long, void *); +static struct notifier_block dca_notifier = { + .notifier_call = igb_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); +#endif /* CONFIG_PCI_IOV */ + +static pci_ers_result_t igb_io_error_detected(struct pci_dev *, + pci_channel_state_t); +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); +static void igb_io_resume(struct pci_dev *); + +static const struct pci_error_handlers igb_err_handler = { + .error_detected = igb_io_error_detected, + .slot_reset = igb_io_slot_reset, + .resume = igb_io_resume, +}; + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); + +static struct pci_driver igb_driver = { + .name = igb_driver_name, + .id_table = igb_pci_tbl, + .probe = igb_probe, + .remove = igb_remove, +#ifdef CONFIG_PM + .driver.pm = &igb_pm_ops, +#endif + .shutdown = igb_shutdown, + .sriov_configure = igb_pci_sriov_configure, + .err_handler = &igb_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +struct igb_reg_info { + u32 ofs; + char *name; +}; + +static const struct igb_reg_info igb_reg_info_tbl[] = { + + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* RX Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* igb_regdump - register printout routine */ +static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDLEN(n)); + break; + case E1000_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDH(n)); + break; + case E1000_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDT(n)); + break; + case E1000_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RXDCTL(n)); + break; + case E1000_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAL(n)); + break; + case E1000_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAH(n)); + break; + case E1000_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDBAL(n)); + break; + case E1000_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDBAH(n)); + break; + case E1000_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDLEN(n)); + break; + case E1000_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDH(n)); + break; + case E1000_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDT(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TXDCTL(n)); + break; + default: + pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igb_dump - Print registers, Tx-rings and Rx-rings */ +static void igb_dump(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_info *reginfo; + struct igb_ring *tx_ring; + union e1000_adv_tx_desc *tx_desc; + struct my_u0 { __le64 a; __le64 b; } *u0; + struct igb_ring *rx_ring; + union e1000_adv_rx_desc *rx_desc; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; + reginfo->name; reginfo++) { + igb_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igb_tx_buffer *buffer_info; + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igb_tx_buffer *buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info(" %5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igb_rx_buffer *buffer_info; + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGB_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address(buffer_info->page) + + buffer_info->page_offset, + igb_rx_bufsz(rx_ring), true); + } + } + } + } + +exit: + return; +} + +/** + * igb_get_i2c_data - Reads the I2C SDA data bit + * @data: opaque pointer to adapter struct + * + * Returns the I2C data bit value + **/ +static int igb_get_i2c_data(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return !!(i2cctl & E1000_I2C_DATA_IN); +} + +/** + * igb_set_i2c_data - Sets the I2C data bit + * @data: pointer to hardware structure + * @state: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static void igb_set_i2c_data(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) + i2cctl |= E1000_I2C_DATA_OUT; + else + i2cctl &= ~E1000_I2C_DATA_OUT; + + i2cctl &= ~E1000_I2C_DATA_OE_N; + i2cctl |= E1000_I2C_CLK_OE_N; + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); + +} + +/** + * igb_set_i2c_clk - Sets the I2C SCL clock + * @data: pointer to hardware structure + * @state: state to set clock + * + * Sets the I2C clock line to state + **/ +static void igb_set_i2c_clk(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) { + i2cctl |= E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } else { + i2cctl &= ~E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} + +/** + * igb_get_i2c_clk - Gets the I2C SCL clock state + * @data: pointer to hardware structure + * + * Gets the I2C clock state + **/ +static int igb_get_i2c_clk(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return !!(i2cctl & E1000_I2C_CLK_IN); +} + +static const struct i2c_algo_bit_data igb_i2c_algo = { + .setsda = igb_set_i2c_data, + .setscl = igb_set_i2c_clk, + .getsda = igb_get_i2c_data, + .getscl = igb_get_i2c_clk, + .udelay = 5, + .timeout = 20, +}; + +/** + * igb_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + **/ +struct net_device *igb_get_hw_dev(struct e1000_hw *hw) +{ + struct igb_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * igb_init_module - Driver Registration Routine + * + * igb_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init igb_init_module(void) +{ + int ret; + + pr_info("%s\n", igb_driver_string); + pr_info("%s\n", igb_copyright); + +#ifdef CONFIG_IGB_DCA + dca_register_notify(&dca_notifier); +#endif + ret = pci_register_driver(&igb_driver); + return ret; +} + +module_init(igb_init_module); + +/** + * igb_exit_module - Driver Exit Cleanup Routine + * + * igb_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit igb_exit_module(void) +{ +#ifdef CONFIG_IGB_DCA + dca_unregister_notify(&dca_notifier); +#endif + pci_unregister_driver(&igb_driver); +} + +module_exit(igb_exit_module); + +#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) +/** + * igb_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + **/ +static void igb_cache_ring_register(struct igb_adapter *adapter) +{ + int i = 0, j = 0; + u32 rbase_offset = adapter->vfs_allocated_count; + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* The queues are allocated for virtualization such that VF 0 + * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. + * In order to avoid collision we start at the first free queue + * and continue consuming queues in the same sequence + */ + if (adapter->vfs_allocated_count) { + for (; i < adapter->rss_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + + Q_IDX_82576(i); + } + fallthrough; + case e1000_82575: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = rbase_offset + j; + break; + } +} + +u32 igb_rd32(struct e1000_hw *hw, u32 reg) +{ + struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (E1000_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igb->netdev; + hw->hw_addr = NULL; + netdev_err(netdev, "PCIe link lost\n"); + WARN(pci_device_is_present(igb->pdev), + "igb: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +/** + * igb_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * This function is intended to handle the writing of the IVAR register + * for adapters 82576 and newer. The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + **/ +static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(E1000_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | E1000_IVAR_VALID) << offset; + + array_wr32(E1000_IVAR0, index, ivar); +} + +#define IGB_N0_QUEUE -1 +static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + int rx_queue = IGB_N0_QUEUE; + int tx_queue = IGB_N0_QUEUE; + u32 msixbm = 0; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case e1000_82575: + /* The 82575 assigns vectors using a bitmask, which matches the + * bitmask for the EICR/EIMS/EIMC registers. To assign one + * or more queues to a vector, we write the appropriate bits + * into the MSIXBM register for that vector. + */ + if (rx_queue > IGB_N0_QUEUE) + msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; + if (tx_queue > IGB_N0_QUEUE) + msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; + array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); + q_vector->eims_value = msixbm; + break; + case e1000_82576: + /* 82576 uses a table that essentially consists of 2 columns + * with 8 rows. The ordering is column-major so we use the + * lower 3 bits as the row index, and the 4th bit as the + * column offset. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue & 0x7, + (rx_queue & 0x8) << 1); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue & 0x7, + ((tx_queue & 0x8) << 1) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* On 82580 and newer adapters the scheme is similar to 82576 + * however instead of ordering column-major we have things + * ordered row-major. So we traverse the table by using + * bit 0 as the column offset, and the remaining bits as the + * row index. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + BUG(); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igb_configure_msix - Configure MSI-X hardware + * @adapter: board private structure to initialize + * + * igb_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void igb_configure_msix(struct igb_adapter *adapter) +{ + u32 tmp; + int i, vector = 0; + struct e1000_hw *hw = &adapter->hw; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case e1000_82575: + tmp = rd32(E1000_CTRL_EXT); + /* enable MSI-X PBA support*/ + tmp |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask interrupts upon ICR read. */ + tmp |= E1000_CTRL_EXT_EIAME; + tmp |= E1000_CTRL_EXT_IRCA; + + wr32(E1000_CTRL_EXT, tmp); + + /* enable msix_other interrupt */ + array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); + adapter->eims_other = E1000_EIMS_OTHER; + + break; + + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_GPIE_PBA | E1000_GPIE_EIAME | + E1000_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | E1000_IVAR_VALID) << 8; + + wr32(E1000_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igb_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igb_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure to initialize + * + * igb_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int igb_request_msix(struct igb_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + struct net_device *netdev = adapter->netdev; + int i, err = 0, vector = 0, free_vector = 0; + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igb_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igb_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + **/ +static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igb_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igb_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igb_free_q_vector. + **/ +static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* Coming from igb_set_interrupt_capability, the vectors are not yet + * allocated. So, q_vector is NULL so we should stop here. + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + netif_napi_del(&q_vector->napi); + +} + +static void igb_reset_interrupt_capability(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + pci_disable_msix(adapter->pdev); + else if (adapter->flags & IGB_FLAG_HAS_MSI) + pci_disable_msi(adapter->pdev); + + while (v_idx--) + igb_reset_q_vector(adapter, v_idx); +} + +/** + * igb_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void igb_free_q_vectors(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igb_reset_q_vector(adapter, v_idx); + igb_free_q_vector(adapter, v_idx); + } +} + +/** + * igb_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: board private structure to initialize + * + * This function resets the device so that it has 0 Rx queues, Tx queues, and + * MSI-X interrupts allocated. + */ +static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) +{ + igb_free_q_vectors(adapter); + igb_reset_interrupt_capability(adapter); +} + +/** + * igb_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) +{ + int err; + int numvecs, i; + + if (!msix) + goto msi_only; + adapter->flags |= IGB_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + if (adapter->vfs_allocated_count) + adapter->num_tx_queues = 1; + else + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + igb_reset_interrupt_capability(adapter); + + /* If we can't do MSI-X, try MSI */ +msi_only: + adapter->flags &= ~IGB_FLAG_HAS_MSIX; +#ifdef CONFIG_PCI_IOV + /* disable SR-IOV for non MSI-X configurations */ + if (adapter->vf_data) { + struct e1000_hw *hw = &adapter->hw; + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); + msleep(500); + + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&adapter->pdev->dev, "IOV Disabled\n"); + } +#endif + adapter->vfs_allocated_count = 0; + adapter->rss_queues = 1; + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGB_FLAG_HAS_MSI; +} + +static void igb_add_ring(struct igb_ring *ring, + struct igb_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igb_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int igb_alloc_q_vector(struct igb_adapter *adapter, + int v_count, int v_idx, + int txr_count, int txr_idx, + int rxr_count, int rxr_idx) +{ + struct igb_q_vector *q_vector; + struct igb_ring *ring; + int ring_count; + size_t size; + + /* igb only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + size = struct_size(q_vector, ring, ring_count); + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) { + q_vector = kzalloc(size, GFP_KERNEL); + } else if (size > ksize(q_vector)) { + kfree_rcu(q_vector, rcu); + q_vector = kzalloc(size, GFP_KERNEL); + } else { + memset(q_vector, 0, size); + } + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igb_add_ring(ring, &q_vector->tx); + + /* For 82575, context index must be unique per ring. */ + if (adapter->hw.mac.type == e1000_82575) + set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + ring->cbs_enable = false; + ring->idleslope = 0; + ring->sendslope = 0; + ring->hicredit = 0; + ring->locredit = 0; + + u64_stats_init(&ring->tx_syncp); + u64_stats_init(&ring->tx_syncp2); + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igb_add_ring(ring, &q_vector->rx); + + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); + + /* On i350, i354, i210, and i211, loopback VLAN packets + * have the tag byte-swapped. + */ + if (adapter->hw.mac.type >= e1000_i350) + set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + u64_stats_init(&ring->rx_syncp); + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + + +/** + * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int igb_alloc_q_vectors(struct igb_adapter *adapter) +{ + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igb_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * This function initializes the interrupts and allocates all of the queues. + **/ +static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + + igb_set_interrupt_capability(adapter, msix); + + err = igb_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igb_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igb_reset_interrupt_capability(adapter); + return err; +} + +/** + * igb_request_irq - initialize interrupts + * @adapter: board private structure to initialize + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igb_request_irq(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + err = igb_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + igb_clear_interrupt_scheme(adapter); + err = igb_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + + igb_setup_all_tx_resources(adapter); + igb_setup_all_rx_resources(adapter); + igb_configure(adapter); + } + + igb_assign_vector(adapter->q_vector[0], 0); + + if (adapter->flags & IGB_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, igb_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igb_reset_interrupt_capability(adapter); + adapter->flags &= ~IGB_FLAG_HAS_MSI; + } + + err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + dev_err(&pdev->dev, "Error %d getting interrupt\n", + err); + +request_done: + return err; +} + +static void igb_free_irq(struct igb_adapter *adapter) +{ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igb_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void igb_irq_disable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* we need to be careful when disabling interrupts. The VFs are also + * mapped into these registers and so clearing the bits can cause + * issues on the VF drivers so we only need to clear what we set + */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 regval = rd32(E1000_EIAM); + + wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); + wr32(E1000_EIMC, adapter->eims_enable_mask); + regval = rd32(E1000_EIAC); + wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(E1000_IAM, 0); + wr32(E1000_IMC, ~0); + wrfl(); + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * igb_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void igb_irq_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; + u32 regval = rd32(E1000_EIAC); + + wr32(E1000_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(E1000_EIAM); + wr32(E1000_EIAM, regval | adapter->eims_enable_mask); + wr32(E1000_EIMS, adapter->eims_enable_mask); + if (adapter->vfs_allocated_count) { + wr32(E1000_MBVFIMR, 0xFF); + ims |= E1000_IMS_VMMB; + } + wr32(E1000_IMS, ims); + } else { + wr32(E1000_IMS, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + wr32(E1000_IAM, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + } +} + +static void igb_update_mng_vlan(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 pf_id = adapter->vfs_allocated_count; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + /* add VID to filter table */ + igb_vfta_set(hw, vid, pf_id, true, true); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; + } + + if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) { + /* remove VID from filter table */ + igb_vfta_set(hw, vid, pf_id, false, true); + } +} + +/** + * igb_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + **/ +static void igb_release_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); +} + +/** + * igb_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + **/ +static void igb_get_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); +} + +static void enable_fqtss(struct igb_adapter *adapter, bool enable) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + + WARN_ON(hw->mac.type != e1000_i210); + + if (enable) + adapter->flags |= IGB_FLAG_FQTSS; + else + adapter->flags &= ~IGB_FLAG_FQTSS; + + if (netif_running(netdev)) + schedule_work(&adapter->reset_task); +} + +static bool is_fqtss_enabled(struct igb_adapter *adapter) +{ + return (adapter->flags & IGB_FLAG_FQTSS) ? true : false; +} + +static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue, + enum tx_queue_prio prio) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 4); + + val = rd32(E1000_I210_TXDCTL(queue)); + + if (prio == TX_QUEUE_PRIO_HIGH) + val |= E1000_TXDCTL_PRIORITY; + else + val &= ~E1000_TXDCTL_PRIORITY; + + wr32(E1000_I210_TXDCTL(queue), val); +} + +static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + + val = rd32(E1000_I210_TQAVCC(queue)); + + if (mode == QUEUE_MODE_STREAM_RESERVATION) + val |= E1000_TQAVCC_QUEUEMODE; + else + val &= ~E1000_TQAVCC_QUEUEMODE; + + wr32(E1000_I210_TQAVCC(queue), val); +} + +static bool is_any_cbs_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->cbs_enable) + return true; + } + + return false; +} + +static bool is_any_txtime_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->launchtime_enable) + return true; + } + + return false; +} + +/** + * igb_config_tx_modes - Configure "Qav Tx mode" features on igb + * @adapter: pointer to adapter struct + * @queue: queue number + * + * Configure CBS and Launchtime for a given hardware queue. + * Parameters are retrieved from the correct Tx ring, so + * igb_save_cbs_params() and igb_save_txtime_params() should be used + * for setting those correctly prior to this function being called. + **/ +static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_ring *ring; + u32 tqavcc, tqavctrl; + u16 value; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + ring = adapter->tx_ring[queue]; + + /* If any of the Qav features is enabled, configure queues as SR and + * with HIGH PRIO. If none is, then configure them with LOW PRIO and + * as SP. + */ + if (ring->cbs_enable || ring->launchtime_enable) { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); + set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + } else { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); + set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); + } + + /* If CBS is enabled, set DataTranARB and config its parameters. */ + if (ring->cbs_enable || queue == 0) { + /* i210 does not allow the queue 0 to be in the Strict + * Priority mode while the Qav mode is enabled, so, + * instead of disabling strict priority mode, we give + * queue 0 the maximum of credits possible. + * + * See section 8.12.19 of the i210 datasheet, "Note: + * Queue0 QueueMode must be set to 1b when + * TransmitMode is set to Qav." + */ + if (queue == 0 && !ring->cbs_enable) { + /* max "linkspeed" idleslope in kbps */ + ring->idleslope = 1000000; + ring->hicredit = ETH_FRAME_LEN; + } + + /* Always set data transfer arbitration to credit-based + * shaper algorithm on TQAVCTRL if CBS is enabled for any of + * the queues. + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + + /* According to i210 datasheet section 7.2.7.7, we should set + * the 'idleSlope' field from TQAVCC register following the + * equation: + * + * For 100 Mbps link speed: + * + * value = BW * 0x7735 * 0.2 (E1) + * + * For 1000Mbps link speed: + * + * value = BW * 0x7735 * 2 (E2) + * + * E1 and E2 can be merged into one equation as shown below. + * Note that 'link-speed' is in Mbps. + * + * value = BW * 0x7735 * 2 * link-speed + * -------------- (E3) + * 1000 + * + * 'BW' is the percentage bandwidth out of full link speed + * which can be found with the following equation. Note that + * idleSlope here is the parameter from this function which + * is in kbps. + * + * BW = idleSlope + * ----------------- (E4) + * link-speed * 1000 + * + * That said, we can come up with a generic equation to + * calculate the value we should set it TQAVCC register by + * replacing 'BW' in E3 by E4. The resulting equation is: + * + * value = idleSlope * 0x7735 * 2 * link-speed + * ----------------- -------------- (E5) + * link-speed * 1000 1000 + * + * 'link-speed' is present in both sides of the fraction so + * it is canceled out. The final equation is the following: + * + * value = idleSlope * 61034 + * ----------------- (E6) + * 1000000 + * + * NOTE: For i210, given the above, we can see that idleslope + * is represented in 16.38431 kbps units by the value at + * the TQAVCC register (1Gbps / 61034), which reduces + * the granularity for idleslope increments. + * For instance, if you want to configure a 2576kbps + * idleslope, the value to be written on the register + * would have to be 157.23. If rounded down, you end + * up with less bandwidth available than originally + * required (~2572 kbps). If rounded up, you end up + * with a higher bandwidth (~2589 kbps). Below the + * approach we take is to always round up the + * calculated value, so the resulting bandwidth might + * be slightly higher for some configurations. + */ + value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); + + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + tqavcc |= value; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + wr32(E1000_I210_TQAVHC(queue), + 0x80000000 + ring->hicredit * 0x7735); + } else { + + /* Set idleSlope to zero. */ + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + /* Set hiCredit to zero. */ + wr32(E1000_I210_TQAVHC(queue), 0); + + /* If CBS is not enabled for any queues anymore, then return to + * the default state of Data Transmission Arbitration on + * TQAVCTRL. + */ + if (!is_any_cbs_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* If LaunchTime is enabled, set DataTranTIM. */ + if (ring->launchtime_enable) { + /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled + * for any of the SR queues, and configure fetchtime delta. + * XXX NOTE: + * - LaunchTime will be enabled for all SR queues. + * - A fixed offset can be added relative to the launch + * time of all packets if configured at reg LAUNCH_OS0. + * We are keeping it as 0 for now (default value). + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANTIM | + E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } else { + /* If Launchtime is not enabled for any SR queues anymore, + * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta, + * effectively disabling Launchtime. + */ + if (!is_any_txtime_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM; + tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* XXX: In i210 controller the sendSlope and loCredit parameters from + * CBS are not configurable by software so we don't do any 'controller + * configuration' in respect to these parameters. + */ + + netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", + ring->cbs_enable ? "enabled" : "disabled", + ring->launchtime_enable ? "enabled" : "disabled", + queue, + ring->idleslope, ring->sendslope, + ring->hicredit, ring->locredit); +} + +static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, + bool enable) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + +static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +/** + * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable + * @adapter: pointer to adapter struct + * + * Configure TQAVCTRL register switching the controller's Tx mode + * if FQTSS mode is enabled or disabled. Additionally, will issue + * a call to igb_config_tx_modes() per queue so any previously saved + * Tx parameters are applied. + **/ +static void igb_setup_tx_mode(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 val; + + /* Only i210 controller supports changing the transmission mode. */ + if (hw->mac.type != e1000_i210) + return; + + if (is_fqtss_enabled(adapter)) { + int i, max_queue; + + /* Configure TQAVCTRL register: set transmit mode to 'Qav', + * set data fetch arbitration to 'round robin', set SP_WAIT_SR + * so SP queues wait for SR ones. + */ + val = rd32(E1000_I210_TQAVCTRL); + val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR; + val &= ~E1000_TQAVCTRL_DATAFETCHARB; + wr32(E1000_I210_TQAVCTRL, val); + + /* Configure Tx and Rx packet buffers sizes as described in + * i210 datasheet section 7.2.7.7. + */ + val = rd32(E1000_TXPBS); + val &= ~I210_TXPBSIZE_MASK; + val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB | + I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB; + wr32(E1000_TXPBS, val); + + val = rd32(E1000_RXPBS); + val &= ~I210_RXPBSIZE_MASK; + val |= I210_RXPBSIZE_PB_30KB; + wr32(E1000_RXPBS, val); + + /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ + * register should not exceed the buffer size programmed in + * TXPBS. The smallest buffer size programmed in TXPBS is 4kB + * so according to the datasheet we should set MAX_TPKT_SIZE to + * 4kB / 64. + * + * However, when we do so, no frame from queue 2 and 3 are + * transmitted. It seems the MAX_TPKT_SIZE should not be great + * or _equal_ to the buffer size programmed in TXPBS. For this + * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64. + */ + val = (4096 - 1) / 64; + wr32(E1000_I210_DTXMXPKTSZ, val); + + /* Since FQTSS mode is enabled, apply any CBS configuration + * previously set. If no previous CBS configuration has been + * done, then the initial configuration is applied, which means + * CBS is disabled. + */ + max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ? + adapter->num_tx_queues : I210_SR_QUEUES_NUM; + + for (i = 0; i < max_queue; i++) { + igb_config_tx_modes(adapter, i); + } + } else { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT); + + val = rd32(E1000_I210_TQAVCTRL); + /* According to Section 8.12.21, the other flags we've set when + * enabling FQTSS are not relevant when disabling FQTSS so we + * don't set they here. + */ + val &= ~E1000_TQAVCTRL_XMIT_MODE; + wr32(E1000_I210_TQAVCTRL, val); + } + + netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ? + "enabled" : "disabled"); +} + +/** + * igb_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void igb_configure(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + igb_get_hw_control(adapter); + igb_set_rx_mode(netdev); + igb_setup_tx_mode(adapter); + + igb_restore_vlan(adapter); + + igb_setup_tctl(adapter); + igb_setup_mrqc(adapter); + igb_setup_rctl(adapter); + + igb_nfc_filter_restore(adapter); + igb_configure_tx(adapter); + igb_configure_rx(adapter); + + igb_rx_fifo_flush_82575(&adapter->hw); + + /* call igb_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); + } +} + +/** + * igb_power_up_link - Power up the phy/serdes link + * @adapter: address of board private structure + **/ +void igb_power_up_link(struct igb_adapter *adapter) +{ + igb_reset_phy(&adapter->hw); + + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_up_phy_copper(&adapter->hw); + else + igb_power_up_serdes_link_82575(&adapter->hw); + + igb_setup_link(&adapter->hw); +} + +/** + * igb_power_down_link - Power down the phy/serdes link + * @adapter: address of board private structure + */ +static void igb_power_down_link(struct igb_adapter *adapter) +{ + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_down_phy_copper_82575(&adapter->hw); + else + igb_shutdown_serdes_link_82575(&adapter->hw); +} + +/** + * igb_check_swap_media - Detect and switch function for Media Auto Sense + * @adapter: address of the board private structure + **/ +static void igb_check_swap_media(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext, connsw; + bool swap_now = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + connsw = rd32(E1000_CONNSW); + + /* need to live swap if current media is copper and we have fiber/serdes + * to go to. + */ + + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { + swap_now = true; + } else if ((hw->phy.media_type != e1000_media_type_copper) && + !(connsw & E1000_CONNSW_SERDESD)) { + /* copper signal takes time to appear */ + if (adapter->copper_tries < 4) { + adapter->copper_tries++; + connsw |= E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + return; + } else { + adapter->copper_tries = 0; + if ((connsw & E1000_CONNSW_PHYSD) && + (!(connsw & E1000_CONNSW_PHY_PDN))) { + swap_now = true; + connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + } + } + } + + if (!swap_now) + return; + + switch (hw->phy.media_type) { + case e1000_media_type_copper: + netdev_info(adapter->netdev, + "MAS: changing media to fiber/serdes\n"); + ctrl_ext |= + E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + adapter->copper_tries = 0; + break; + case e1000_media_type_internal_serdes: + case e1000_media_type_fiber: + netdev_info(adapter->netdev, + "MAS: changing media to copper\n"); + ctrl_ext &= + ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + break; + default: + /* shouldn't get here during regular operation */ + netdev_err(adapter->netdev, + "AMS: Invalid media type found, returning\n"); + break; + } + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +/** + * igb_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + **/ +int igb_up(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* hardware has been reset, we need to reload some things */ + igb_configure(adapter); + + clear_bit(__IGB_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + igb_configure_msix(adapter); + else + igb_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(E1000_TSICR); + rd32(E1000_ICR); + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + + if ((adapter->flags & IGB_FLAG_EEE) && + (!hw->dev_spec._82575.eee_disable)) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + + return 0; +} + +void igb_down(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__IGB_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = rd32(E1000_RCTL); + wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + igb_nfc_filter_exit(adapter); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* disable transmits in the hardware */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_EN; + wr32(E1000_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 11000); + + igb_irq_disable(adapter); + + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igb_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + + igb_clean_all_tx_rings(adapter); + igb_clean_all_rx_rings(adapter); +#ifdef CONFIG_IGB_DCA + + /* since we reset the hardware DCA settings were cleared */ + igb_setup_dca(adapter); +#endif +} + +void igb_reinit_locked(struct igb_adapter *adapter) +{ + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igb_down(adapter); + igb_up(adapter); + clear_bit(__IGB_RESETTING, &adapter->state); +} + +/** igb_enable_mas - Media Autosense re-enable after swap + * + * @adapter: adapter struct + **/ +static void igb_enable_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 connsw = rd32(E1000_CONNSW); + + /* configure for SerDes media detect */ + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_SERDESD))) { + connsw |= E1000_CONNSW_ENRGSRC; + connsw |= E1000_CONNSW_AUTOSENSE_EN; + wr32(E1000_CONNSW, connsw); + wrfl(); + } +} + +void igb_reset(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + switch (mac->type) { + case e1000_i350: + case e1000_i354: + case e1000_82580: + pba = rd32(E1000_RXPBS); + pba = igb_rxpbs_adjust_82580(pba); + break; + case e1000_82576: + pba = rd32(E1000_RXPBS); + pba &= E1000_RXPBS_SIZE_MASK_82576; + break; + case e1000_82575: + case e1000_i210: + case e1000_i211: + default: + pba = E1000_PBA_34K; + break; + } + + if (mac->type == e1000_82575) { + u32 min_rx_space, min_tx_space, needed_tx_space; + + /* write Rx PBA so that hardware can report correct Tx PBA */ + wr32(E1000_PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024); + + /* The Tx FIFO also stores 16 bytes of information about the Tx + * but don't include Ethernet FCS because hardware appends it. + * We only need to round down to the nearest 512 byte block + * count since the value we care about is 2 frames, not 1. + */ + min_tx_space = adapter->max_frame_size; + min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN; + min_tx_space = DIV_ROUND_UP(min_tx_space, 512); + + /* upper 16 bits has Tx packet buffer allocation size in KB */ + needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16); + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation. + */ + if (needed_tx_space < pba) { + pba -= needed_tx_space; + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + + /* adjust PBA for jumbo frames */ + wr32(E1000_PBA, pba); + } + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + /* disable receive for all VFs and wait one second */ + if (adapter->vfs_allocated_count) { + int i; + + for (i = 0 ; i < adapter->vfs_allocated_count; i++) + adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; + + /* ping all the active vfs to let them know we are going down */ + igb_ping_all_vfs(adapter); + + /* disable transmits and receives */ + wr32(E1000_VFRE, 0); + wr32(E1000_VFTE, 0); + } + + /* Allow time for pending master requests to run */ + hw->mac.ops.reset_hw(hw); + wr32(E1000_WUC, 0); + + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + /* need to resetup here after media swap */ + adapter->ei.get_invariants(hw); + adapter->flags &= ~IGB_FLAG_MEDIA_RESET; + } + if ((mac->type == e1000_82575 || mac->type == e1000_i350) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_enable_mas(adapter); + } + if (hw->mac.ops.init_hw(hw)) + dev_err(&pdev->dev, "Hardware Error\n"); + + /* RAR registers were cleared during init_hw, clear mac table */ + igb_flush_mac_table(adapter); + __dev_uc_unsync(adapter->netdev, NULL); + + /* Recover default RAR entry */ + igb_set_default_mac_filter(adapter); + + /* Flow control settings reset on hardware reset, so guarantee flow + * control is off when forcing speed. + */ + if (!hw->mac.autoneg) + igb_force_mac_fc(hw); + + igb_init_dmac(adapter, pba); +#ifdef CONFIG_IGB_HWMON + /* Re-initialize the thermal sensor on i350 devices. */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (mac->type == e1000_i350 && hw->bus.func == 0) { + /* If present, re-initialize the external thermal sensor + * interface. + */ + if (adapter->ets) + mac->ops.init_thermal_sensor_thresh(hw); + } + } +#endif + /* Re-establish EEE setting */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (mac->type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + igb_set_eee_i350(hw, true, true); + break; + case e1000_i354: + igb_set_eee_i354(hw, true, true); + break; + default: + break; + } + } + if (!netif_running(adapter->netdev)) + igb_power_down_link(adapter); + + igb_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + + /* Re-enable PTP, where applicable. */ + if (adapter->ptp_flags & IGB_PTP_ENABLED) + igb_ptp_reset(adapter); + + igb_get_phy_info(hw); +} + +static netdev_features_t igb_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igb_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igb_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igb_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) { + struct hlist_node *node2; + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + hlist_for_each_entry_safe(rule, node2, + &adapter->nfc_filter_list, nfc_node) { + igb_erase_filter(adapter, rule); + hlist_del(&rule->nfc_node); + kfree(rule); + } + spin_unlock(&adapter->nfc_lock); + adapter->nfc_filter_count = 0; + } + + netdev->features = features; + + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + + return 1; +} + +static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags, + struct netlink_ext_ack *extack) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + struct igb_adapter *adapter = netdev_priv(dev); + int vfn = adapter->vfs_allocated_count; + + if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn)) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + +#define IGB_MAX_MAC_HDR_LEN 127 +#define IGB_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +igb_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_GSO_UDP_L4 | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_GSO_UDP_L4 | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igb_offload_apply(struct igb_adapter *adapter, s32 queue) +{ + if (!is_fqtss_enabled(adapter)) { + enable_fqtss(adapter, true); + return; + } + + igb_config_tx_modes(adapter, queue); + + if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter)) + enable_fqtss(adapter, false); +} + +static int igb_offload_cbs(struct igb_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* CBS offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* CBS offloading is only supported by queue 0 and queue 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +#define VLAN_PRIO_FULL_MASK (0x07) + +static int igb_parse_cls_flower(struct igb_adapter *adapter, + struct flow_cls_offload *f, + int traffic_class, + struct igb_nfc_filter *input) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; + struct netlink_ext_ack *extack = f->common.extack; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported"); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + if (!is_zero_ether_addr(match.mask->dst)) { + if (!is_broadcast_ether_addr(match.mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, match.key->dst); + } + + if (!is_zero_ether_addr(match.mask->src)) { + if (!is_broadcast_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, match.key->src); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + if (match.mask->n_proto) { + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; + input->filter.etype = match.key->n_proto; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_priority) { + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + input->filter.vlan_tci = + (__force __be16)match.key->vlan_priority; + } + } + + input->action = traffic_class; + input->cookie = f->cookie; + + return 0; +} + +static int igb_configure_clsflower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + struct netlink_ext_ack *extack = cls_flower->common.extack; + struct igb_nfc_filter *filter, *f; + int err, tc; + + tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); + if (tc < 0) { + NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class"); + return -EINVAL; + } + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + err = igb_parse_cls_flower(adapter, cls_flower, tc, filter); + if (err < 0) + goto err_parse; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in ethtool"); + goto err_locked; + } + } + + hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in cls_flower"); + goto err_locked; + } + } + + err = igb_add_filter(adapter, filter); + if (err < 0) { + NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter"); + goto err_locked; + } + + hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list); + + spin_unlock(&adapter->nfc_lock); + + return 0; + +err_locked: + spin_unlock(&adapter->nfc_lock); + +err_parse: + kfree(filter); + + return err; +} + +static int igb_delete_clsflower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + struct igb_nfc_filter *filter; + int err; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node) + if (filter->cookie == cls_flower->cookie) + break; + + if (!filter) { + err = -ENOENT; + goto out; + } + + err = igb_erase_filter(adapter, filter); + if (err < 0) + goto out; + + hlist_del(&filter->nfc_node); + kfree(filter); + +out: + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return igb_configure_clsflower(adapter, cls_flower); + case FLOW_CLS_DESTROY: + return igb_delete_clsflower(adapter, cls_flower); + case FLOW_CLS_STATS: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct igb_adapter *adapter = cb_priv; + + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return igb_setup_tc_cls_flower(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igb_offload_txtime(struct igb_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* Launchtime offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* Launchtime offloading is only supported by queues 0 and 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + +static LIST_HEAD(igb_block_cb_list); + +static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_CBS: + return igb_offload_cbs(adapter, type_data); + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &igb_block_cb_list, + igb_setup_tc_block_cb, + adapter, adapter, true); + + case TC_SETUP_QDISC_ETF: + return igb_offload_txtime(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) +{ + int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD; + struct igb_adapter *adapter = netdev_priv(dev); + struct bpf_prog *prog = bpf->prog, *old_prog; + bool running = netif_running(dev); + bool need_reset; + + /* verify igb ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + if (frame_size > igb_rx_bufsz(ring)) { + NL_SET_ERR_MSG_MOD(bpf->extack, + "The RX buffer size is too small for the frame size"); + netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n", + igb_rx_bufsz(ring), frame_size); + return -EINVAL; + } + } + + old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); + + /* device is up and bpf is added/removed, must setup the RX queues */ + if (need_reset && running) { + igb_close(dev); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + (void)xchg(&adapter->rx_ring[i]->xdp_prog, + adapter->xdp_prog); + } + + if (old_prog) + bpf_prog_put(old_prog); + + /* bpf is just replaced, RXQ and MTU are already setup */ + if (!need_reset) + return 0; + + if (running) + igb_open(dev); + + return 0; +} + +static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return igb_xdp_setup(dev, xdp); + default: + return -EINVAL; + } +} + +static void igb_xdp_ring_update_tail(struct igb_ring *ring) +{ + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) +{ + unsigned int r_idx = smp_processor_id(); + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct igb_ring *tx_ring; + struct netdev_queue *nq; + u32 ret; + + if (unlikely(!xdpf)) + return IGB_XDP_CONSUMED; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + if (unlikely(!tx_ring)) + return IGB_XDP_CONSUMED; + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + nq->trans_start = jiffies; + ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); + __netif_tx_unlock(nq); + + return ret; +} + +static int igb_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct igb_ring *tx_ring; + struct netdev_queue *nq; + int nxmit = 0; + int i; + + if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + if (unlikely(!tx_ring)) + return -ENXIO; + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + + /* Avoid transmit queue timeout since we share it with the slow path */ + nq->trans_start = jiffies; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); + if (err != IGB_XDP_TX) + break; + nxmit++; + } + + __netif_tx_unlock(nq); + + if (unlikely(flags & XDP_XMIT_FLUSH)) + igb_xdp_ring_update_tail(tx_ring); + + return nxmit; +} + +static const struct net_device_ops igb_netdev_ops = { + .ndo_open = igb_open, + .ndo_stop = igb_close, + .ndo_start_xmit = igb_xmit_frame, + .ndo_get_stats64 = igb_get_stats64, + .ndo_set_rx_mode = igb_set_rx_mode, + .ndo_set_mac_address = igb_set_mac, + .ndo_change_mtu = igb_change_mtu, + .ndo_do_ioctl = igb_ioctl, + .ndo_tx_timeout = igb_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, + .ndo_set_vf_mac = igb_ndo_set_vf_mac, + .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, + .ndo_set_vf_rate = igb_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, + .ndo_set_vf_trust = igb_ndo_set_vf_trust, + .ndo_get_vf_config = igb_ndo_get_vf_config, + .ndo_fix_features = igb_fix_features, + .ndo_set_features = igb_set_features, + .ndo_fdb_add = igb_ndo_fdb_add, + .ndo_features_check = igb_features_check, + .ndo_setup_tc = igb_setup_tc, + .ndo_bpf = igb_xdp, + .ndo_xdp_xmit = igb_xdp_xmit, +}; + +/** + * igb_set_fw_version - Configure version string for ethtool + * @adapter: adapter struct + **/ +void igb_set_fw_version(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_fw_version fw; + + igb_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (!(igb_get_flash_presence_i210(hw))) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); + break; + } + fallthrough; + default: + /* if option is rom valid, display its version too */ + if (fw.or_valid) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ + } else if (fw.etrack_id != 0X0000) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); + } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); + } + break; + } +} + +/** + * igb_init_mas - init Media Autosense feature if enabled in the NVM + * + * @adapter: adapter struct + **/ +static void igb_init_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 eeprom_data; + + hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); + switch (hw->bus.func) { + case E1000_FUNC_0: + if (eeprom_data & IGB_MAS_ENABLE_0) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_1: + if (eeprom_data & IGB_MAS_ENABLE_1) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_2: + if (eeprom_data & IGB_MAS_ENABLE_2) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_3: + if (eeprom_data & IGB_MAS_ENABLE_3) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + default: + /* Shouldn't get here */ + netdev_err(adapter->netdev, + "MAS: Invalid port configuration, returning\n"); + break; + } +} + +/** + * igb_init_i2c - Init I2C interface + * @adapter: pointer to adapter structure + **/ +static s32 igb_init_i2c(struct igb_adapter *adapter) +{ + s32 status = 0; + + /* I2C interface supported on i350 devices */ + if (adapter->hw.mac.type != e1000_i350) + return 0; + + /* Initialize the i2c bus which is controlled by the registers. + * This bus will use the i2c_algo_bit structure that implements + * the protocol through toggling of the 4 bits in the register. + */ + adapter->i2c_adap.owner = THIS_MODULE; + adapter->i2c_algo = igb_i2c_algo; + adapter->i2c_algo.data = adapter; + adapter->i2c_adap.algo_data = &adapter->i2c_algo; + adapter->i2c_adap.dev.parent = &adapter->pdev->dev; + strlcpy(adapter->i2c_adap.name, "igb BB", + sizeof(adapter->i2c_adap.name)); + status = i2c_bit_add_bus(&adapter->i2c_adap); + return status; +} + +/** + * igb_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igb_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igb_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct igb_adapter *adapter; + struct e1000_hw *hw; + u16 eeprom_data = 0; + s32 ret_val; + static int global_quad_port_a; /* global quad port a indication */ + const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; + int err, pci_using_dac; + u8 part_str[E1000_PBANUM_LENGTH]; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, KERN_ERR "%s (%x:%x) should not be a VF!\n", + pci_name(pdev), pdev->vendor, pdev->device); + return -EINVAL; + } + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + + err = pci_request_mem_regions(pdev, igb_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), + IGB_MAX_TX_QUEUES); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = -EIO; + adapter->io_addr = pci_iomap(pdev, 0, 0); + if (!adapter->io_addr) + goto err_ioremap; + /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igb_netdev_ops; + igb_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC, PHY and NVM function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* setup the private structure */ + err = igb_sw_init(adapter); + if (err) + goto err_sw_init; + + igb_get_bus_info_pcie(hw); + + hw->phy.autoneg_wait_to_complete = false; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = AUTO_ALL_MODES; + hw->phy.disable_polarity_correction = false; + hw->phy.ms_type = e1000_ms_hw_default; + } + + if (igb_check_reset_block(hw)) + dev_info(&pdev->dev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + /* features is initialized to 0 in allocation, it might have bits + * set by igb_sw_init so we should use an or instead of an + * assignment. + */ + netdev->features |= NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; + + if (hw->mac.type >= e1000_82576) + netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; + + if (hw->mac.type >= e1000_i350) + netdev->features |= NETIF_F_HW_TC; + +#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL; + + if (hw->mac.type >= e1000_i350) + netdev->hw_features |= NETIF_F_NTUPLE; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + /* make sure the NVM is good , i211/i210 parts can have special NVM + * that doesn't contain a checksum + */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, + "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + break; + default: + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + break; + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + igb_set_default_mac_filter(adapter); + + /* get firmware version for ethtool -i */ + igb_set_fw_version(adapter); + + /* configure RXPBSIZE and TXPBSIZE */ + if (hw->mac.type == e1000_i210) { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + } + + timer_setup(&adapter->watchdog_timer, igb_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igb_reset_task); + INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0x2f; + + hw->fc.requested_mode = e1000_fc_default; + hw->fc.current_mode = e1000_fc_default; + + igb_validate_mdi_setting(hw); + + /* By default, support wake on port A */ + if (hw->bus.func == 0) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* Check the NVM for wake support on non-port A ports */ + if (hw->mac.type >= e1000_82580) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &eeprom_data); + else if (hw->bus.func == 1) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + + if (eeprom_data & IGB_EEPROM_APME) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* now that we have the eeprom settings, apply the special cases where + * the eeprom may be wrong or the board simply won't support wake on + * lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82575GB_QUAD_COPPER: + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + else + adapter->flags |= IGB_FLAG_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + /* If the device can't wake, don't set software support */ + if (!device_can_wakeup(&adapter->pdev->dev)) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + } + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) + adapter->wol |= E1000_WUFC_MAG; + + /* Some vendors want WoL disabled by default, but still supported */ + if ((hw->mac.type == e1000_i350) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + + /* Some vendors want the ability to Use the EEPROM setting as + * enable/disable only, and not for capability + */ + if (((hw->mac.type == e1000_i350) || + (hw->mac.type == e1000_i354)) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (hw->mac.type == e1000_i350) { + if (((pdev->subsystem_device == 0x5001) || + (pdev->subsystem_device == 0x5002)) && + (hw->bus.func == 0)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (pdev->subsystem_device == 0x1F52) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + } + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGB_FLAG_WOL_SUPPORTED); + + /* reset the hardware with the new settings */ + igb_reset(adapter); + + /* Init the I2C interface */ + err = igb_init_i2c(adapter); + if (err) { + dev_err(&pdev->dev, "failed to init i2c interface\n"); + goto err_eeprom; + } + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + +#ifdef CONFIG_IGB_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + } + +#endif +#ifdef CONFIG_IGB_HWMON + /* Initialize the thermal sensor on i350 devices. */ + if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { + u16 ets_word; + + /* Read the NVM to determine if this i350 device supports an + * external thermal sensor. + */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); + if (ets_word != 0x0000 && ets_word != 0xFFFF) + adapter->ets = true; + else + adapter->ets = false; + if (igb_sysfs_init(adapter)) + dev_err(&pdev->dev, + "failed to allocate sysfs resources\n"); + } else { + adapter->ets = false; + } +#endif + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + if (hw->dev_spec._82575.mas_capable) + igb_init_mas(adapter); + + /* do hw tstamp init after resetting */ + igb_ptp_init(adapter); + + dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); + /* print bus type/speed/width info, not applicable to i354 */ + if (hw->mac.type != e1000_i354) { + dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", + netdev->name, + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : + (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : + "unknown"), + ((hw->bus.width == e1000_bus_width_pcie_x4) ? + "Width x4" : + (hw->bus.width == e1000_bus_width_pcie_x2) ? + "Width x2" : + (hw->bus.width == e1000_bus_width_pcie_x1) ? + "Width x1" : "unknown"), netdev->dev_addr); + } + + if ((hw->mac.type == e1000_82576 && + rd32(E1000_EECD) & E1000_EECD_PRES) || + (hw->mac.type >= e1000_i210 || + igb_get_flash_presence_i210(hw))) { + ret_val = igb_read_part_string(hw, part_str, + E1000_PBANUM_LENGTH); + } else { + ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; + } + + if (ret_val) + strcpy(part_str, "Unknown"); + dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); + dev_info(&pdev->dev, + "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", + (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : + (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", + adapter->num_rx_queues, adapter->num_tx_queues); + if (hw->phy.media_type == e1000_media_type_copper) { + switch (hw->mac.type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + /* Enable EEE for internal copper PHY devices */ + err = igb_set_eee_i350(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + break; + case e1000_i354: + if ((rd32(E1000_CTRL_EXT) & + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + err = igb_set_eee_i354(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + } + break; + default: + break; + } + } + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + + pm_runtime_put_noidle(&pdev->dev); + return 0; + +err_register: + igb_release_hw_control(adapter); + memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); +err_eeprom: + if (!igb_check_reset_block(hw)) + igb_reset_phy(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); +err_sw_init: + kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); + igb_clear_interrupt_scheme(adapter); +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif + pci_iounmap(pdev, adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +#ifdef CONFIG_PCI_IOV +static int igb_disable_sriov(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* reclaim resources allocated to VFs */ + if (adapter->vf_data) { + /* disable iov and allow time for transactions to clear */ + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); + return -EPERM; + } else { + pci_disable_sriov(pdev); + msleep(500); + } + + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&pdev->dev, "IOV Disabled\n"); + + /* Re-enable DMA Coalescing flag since IOV is turned off */ + adapter->flags |= IGB_FLAG_DMAC; + } + + return 0; +} + +static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + int old_vfs = pci_num_vf(pdev); + struct vf_mac_filter *mac_list; + int err = 0; + int num_vf_mac_filters, i; + + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { + err = -EPERM; + goto out; + } + if (!num_vfs) + goto out; + + if (old_vfs) { + dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", + old_vfs, max_vfs); + adapter->vfs_allocated_count = old_vfs; + } else + adapter->vfs_allocated_count = num_vfs; + + adapter->vf_data = kcalloc(adapter->vfs_allocated_count, + sizeof(struct vf_data_storage), GFP_KERNEL); + + /* if allocation failed then we do not support SR-IOV */ + if (!adapter->vf_data) { + adapter->vfs_allocated_count = 0; + err = -ENOMEM; + goto out; + } + + /* Due to the limited number of RAR entries calculate potential + * number of MAC filters available for the VFs. Reserve entries + * for PF default MAC, PF MAC filters and at least one RAR entry + * for each VF for VF MAC. + */ + num_vf_mac_filters = adapter->hw.mac.rar_entry_count - + (1 + IGB_PF_MAC_FILTERS_RESERVED + + adapter->vfs_allocated_count); + + adapter->vf_mac_list = kcalloc(num_vf_mac_filters, + sizeof(struct vf_mac_filter), + GFP_KERNEL); + + mac_list = adapter->vf_mac_list; + INIT_LIST_HEAD(&adapter->vf_macs.l); + + if (adapter->vf_mac_list) { + /* Initialize list of VF MAC filters */ + for (i = 0; i < num_vf_mac_filters; i++) { + mac_list->vf = -1; + mac_list->free = true; + list_add(&mac_list->l, &adapter->vf_macs.l); + mac_list++; + } + } else { + /* If we could not allocate memory for the VF MAC filters + * we can continue without this feature but warn user. + */ + dev_err(&pdev->dev, + "Unable to allocate memory for VF MAC filter list\n"); + } + + /* only call pci_enable_sriov() if no VFs are allocated already */ + if (!old_vfs) { + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } + dev_info(&pdev->dev, "%d VFs allocated\n", + adapter->vfs_allocated_count); + for (i = 0; i < adapter->vfs_allocated_count; i++) + igb_vf_configure(adapter, i); + + /* DMA Coalescing is not supported in IOV mode. */ + adapter->flags &= ~IGB_FLAG_DMAC; + goto out; + +err_out: + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; +out: + return err; +} + +#endif +/** + * igb_remove_i2c - Cleanup I2C interface + * @adapter: pointer to adapter structure + **/ +static void igb_remove_i2c(struct igb_adapter *adapter) +{ + /* free the adapter bus structure */ + i2c_del_adapter(&adapter->i2c_adap); +} + +/** + * igb_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igb_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void igb_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pm_runtime_get_noresume(&pdev->dev); +#ifdef CONFIG_IGB_HWMON + igb_sysfs_exit(adapter); +#endif + igb_remove_i2c(adapter); + igb_ptp_stop(adapter); + /* The watchdog timer may be rescheduled, so explicitly + * disable watchdog from being rescheduled. + */ + set_bit(__IGB_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + +#ifdef CONFIG_IGB_DCA + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + dev_info(&pdev->dev, "DCA disabled\n"); + dca_remove_requester(&pdev->dev); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } +#endif + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif + + unregister_netdev(netdev); + + igb_clear_interrupt_scheme(adapter); + + pci_iounmap(pdev, adapter->io_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_mem_regions(pdev); + + kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/** + * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space + * @adapter: board private structure to initialize + * + * This function initializes the vf specific data storage and then attempts to + * allocate the VFs. The reason for ordering it this way is because it is much + * mor expensive time wise to disable SR-IOV than it is to allocate and free + * the memory for the VFs. + **/ +static void igb_probe_vfs(struct igb_adapter *adapter) +{ +#ifdef CONFIG_PCI_IOV + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + + /* Virtualization features not supported on i210 family. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + return; + + /* Of the below we really only want the effect of getting + * IGB_FLAG_HAS_MSIX set (if available), without which + * igb_enable_sriov() has no effect. + */ + igb_set_interrupt_capability(adapter, true); + igb_reset_interrupt_capability(adapter); + + pci_sriov_set_totalvfs(pdev, 7); + igb_enable_sriov(pdev, max_vfs); + +#endif /* CONFIG_PCI_IOV */ +} + +unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned int max_rss_queues; + + /* Determine the maximum number of RSS queues supported. */ + switch (hw->mac.type) { + case e1000_i211: + max_rss_queues = IGB_MAX_RX_QUEUES_I211; + break; + case e1000_82575: + case e1000_i210: + max_rss_queues = IGB_MAX_RX_QUEUES_82575; + break; + case e1000_i350: + /* I350 cannot do RSS and SR-IOV at the same time */ + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 1; + break; + } + fallthrough; + case e1000_82576: + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 2; + break; + } + fallthrough; + case e1000_82580: + case e1000_i354: + default: + max_rss_queues = IGB_MAX_RX_QUEUES; + break; + } + + return max_rss_queues; +} + +static void igb_init_queue_configuration(struct igb_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igb_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igb_set_flag_queue_pairs(adapter, max_rss_queues); +} + +void igb_set_flag_queue_pairs(struct igb_adapter *adapter, + const u32 max_rss_queues) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Determine if we need to pair queues. */ + switch (hw->mac.type) { + case e1000_82575: + case e1000_i211: + /* Device supports enough interrupts without queue pairing. */ + break; + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + default: + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; + break; + } +} + +/** + * igb_sw_init - Initialize general software structures (struct igb_adapter) + * @adapter: board private structure to initialize + * + * igb_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int igb_sw_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGB_DEFAULT_TXD; + adapter->rx_ring_count = IGB_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGB_DEFAULT_ITR; + adapter->tx_itr_setting = IGB_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; + + adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->nfc_lock); + spin_lock_init(&adapter->stats64_lock); +#ifdef CONFIG_PCI_IOV + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + if (max_vfs > 7) { + dev_warn(&pdev->dev, + "Maximum of 7 VFs per PF, using max\n"); + max_vfs = adapter->vfs_allocated_count = 7; + } else + adapter->vfs_allocated_count = max_vfs; + if (adapter->vfs_allocated_count) + dev_warn(&pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); + break; + default: + break; + } +#endif /* CONFIG_PCI_IOV */ + + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGB_FLAG_HAS_MSIX; + + adapter->mac_table = kcalloc(hw->mac.rar_entry_count, + sizeof(struct igb_mac_addr), + GFP_KERNEL); + if (!adapter->mac_table) + return -ENOMEM; + + igb_probe_vfs(adapter); + + igb_init_queue_configuration(adapter); + + /* Setup and initialize a copy of the hw vlan table array */ + adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), + GFP_KERNEL); + if (!adapter->shadow_vfta) + return -ENOMEM; + + /* This call may decrease the number of queues */ + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igb_irq_disable(adapter); + + if (hw->mac.type >= e1000_i350) + adapter->flags &= ~IGB_FLAG_DMAC; + + set_bit(__IGB_DOWN, &adapter->state); + return 0; +} + +/** + * __igb_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: indicates whether we are in a resume call + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int __igb_open(struct net_device *netdev, bool resuming) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + int i; + + /* disallow open during test */ + if (test_bit(__IGB_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = igb_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igb_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igb_power_up_link(adapter); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + igb_configure(adapter); + + err = igb_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(adapter->netdev, + adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); + if (err) + goto err_set_queues; + + /* From here on the code is the same as igb_up() */ + clear_bit(__IGB_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); + + /* Clear any pending interrupts. */ + rd32(E1000_TSICR); + rd32(E1000_ICR); + + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + netif_tx_start_all_queues(netdev); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + + return 0; + +err_set_queues: + igb_free_irq(adapter); +err_req_irq: + igb_release_hw_control(adapter); + igb_power_down_link(adapter); + igb_free_all_rx_resources(adapter); +err_setup_rx: + igb_free_all_tx_resources(adapter); +err_setup_tx: + igb_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igb_open(struct net_device *netdev) +{ + return __igb_open(netdev, false); +} + +/** + * __igb_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: indicates we are in a suspend call + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int __igb_close(struct net_device *netdev, bool suspending) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igb_down(adapter); + igb_free_irq(adapter); + + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + return 0; +} + +int igb_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igb_close(netdev, false); + return 0; +} + +/** + * igb_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int igb_setup_tx_resources(struct igb_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size; + + size = sizeof(struct igb_tx_buffer) * tx_ring->count; + + tx_ring->tx_buffer_info = vmalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_tx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igb_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Tx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + **/ +void igb_setup_tctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which is enabled by default on 82575 and 82576 */ + wr32(E1000_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + igb_config_collision_dist(hw); + + /* Enable transmits */ + tctl |= E1000_TCTL_EN; + + wr32(E1000_TCTL, tctl); +} + +/** + * igb_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + **/ +void igb_configure_tx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txdctl = 0; + u64 tdba = ring->dma; + int reg_idx = ring->reg_idx; + + wr32(E1000_TDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_tx_desc)); + wr32(E1000_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(E1000_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + E1000_TDT(reg_idx); + wr32(E1000_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGB_TX_PTHRESH; + txdctl |= IGB_TX_HTHRESH << 8; + txdctl |= IGB_TX_WTHRESH << 16; + + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct igb_tx_buffer) * ring->count); + + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + wr32(E1000_TXDCTL(reg_idx), txdctl); +} + +/** + * igb_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igb_configure_tx(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* disable the queues */ + for (i = 0; i < adapter->num_tx_queues; i++) + wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0); + + wrfl(); + usleep_range(10000, 20000); + + for (i = 0; i < adapter->num_tx_queues; i++) + igb_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igb_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int igb_setup_rx_resources(struct igb_ring *rx_ring) +{ + struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); + struct device *dev = rx_ring->dev; + int size; + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; + + rx_ring->rx_buffer_info = vmalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + rx_ring->xdp_prog = adapter->xdp_prog; + + /* XDP RX-queue info */ + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index, 0) < 0) + goto err; + + return 0; + +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_rx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igb_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Rx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + **/ +static void igb_setup_mrqc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 j, num_rx_queues; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(E1000_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_rx_queues = 2; + break; + default: + break; + } + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGB_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGB_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igb_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + if (adapter->hw.mac.type >= e1000_82576) + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= E1000_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(E1000_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + /* If VMDq is enabled then we set the appropriate mode for that, else + * we default to RSS so that an RSS hash is calculated per packet even + * if we are only using one queue + */ + if (adapter->vfs_allocated_count) { + if (hw->mac.type > e1000_82575) { + /* Set the default pool for the PF's first queue */ + u32 vtctl = rd32(E1000_VT_CTL); + + vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | + E1000_VT_CTL_DISABLE_DEF_POOL); + vtctl |= adapter->vfs_allocated_count << + E1000_VT_CTL_DEFAULT_POOL_SHIFT; + wr32(E1000_VT_CTL, vtctl); + } + if (adapter->rss_queues > 1) + mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ; + else + mrqc |= E1000_MRQC_ENABLE_VMDQ; + } else { + mrqc |= E1000_MRQC_ENABLE_RSS_MQ; + } + igb_vmm_control(adapter); + + wr32(E1000_MRQC, mrqc); +} + +/** + * igb_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +void igb_setup_rctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(E1000_RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* enable stripping of CRC. It's unlikely this will break BMC + * redirection as it did with e1000. Newer features require + * that the HW strips the CRC. + */ + rctl |= E1000_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= E1000_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(E1000_RXDCTL(0), 0); + + /* Attention!!! For SR-IOV PF driver operations you must enable + * queue drop for all VF and PF queues to prevent head of line blocking + * if an un-trusted VF does not provide descriptors to hardware. + */ + if (adapter->vfs_allocated_count) { + /* set all queue drop enable bits */ + wr32(E1000_QDE, ALL_QUEUES); + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + wr32(E1000_RCTL, rctl); +} + +static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, + int vfn) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + if (size > MAX_JUMBO_FRAME_SIZE) + size = MAX_JUMBO_FRAME_SIZE; + + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= size | E1000_VMOLR_LPE; + wr32(E1000_VMOLR(vfn), vmolr); + + return 0; +} + +static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, + int vfn, bool enable) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val, reg; + + if (hw->mac.type < e1000_82576) + return; + + if (hw->mac.type == e1000_i350) + reg = E1000_DVMOLR(vfn); + else + reg = E1000_VMOLR(vfn); + + val = rd32(reg); + if (enable) + val |= E1000_VMOLR_STRVLAN; + else + val &= ~(E1000_VMOLR_STRVLAN); + wr32(reg, val); +} + +static inline void igb_set_vmolr(struct igb_adapter *adapter, + int vfn, bool aupe) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + /* This register exists only on 82576 and newer so if we are older then + * we should exit and do nothing + */ + if (hw->mac.type < e1000_82576) + return; + + vmolr = rd32(E1000_VMOLR(vfn)); + if (aupe) + vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ + else + vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ + + /* clear all bits that might not be set */ + vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); + + if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ + /* for VMDq only allow the VFs and pool 0 to accept broadcast and + * multicast packets + */ + if (vfn <= adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + + wr32(E1000_VMOLR(vfn), vmolr); +} + +/** + * igb_setup_srrctl - configure the split and replication receive control + * registers + * @adapter: Board private structure + * @ring: receive ring to be configured + **/ +void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u32 srrctl = 0; + + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + if (ring_uses_large_buffer(ring)) + srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + if (hw->mac.type >= e1000_82580) + srrctl |= E1000_SRRCTL_TIMESTAMP; + /* Only set Drop Enable if VFs allocated, or we are supporting multiple + * queues and rx flow control is disabled + */ + if (adapter->vfs_allocated_count || + (!(hw->fc.current_mode & e1000_fc_rx_pause) && + adapter->num_rx_queues > 1)) + srrctl |= E1000_SRRCTL_DROP_EN; + + wr32(E1000_SRRCTL(reg_idx), srrctl); +} + +/** + * igb_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + **/ +void igb_configure_rx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + union e1000_adv_rx_desc *rx_desc; + u64 rdba = ring->dma; + int reg_idx = ring->reg_idx; + u32 rxdctl = 0; + + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + + /* disable the queue */ + wr32(E1000_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(E1000_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(E1000_RDBAH(reg_idx), rdba >> 32); + wr32(E1000_RDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + E1000_RDT(reg_idx); + wr32(E1000_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* set descriptor configuration */ + igb_setup_srrctl(adapter, ring); + + /* set filtering for VMDQ pools */ + igb_set_vmolr(adapter, reg_idx & 0x7, true); + + rxdctl |= IGB_RX_PTHRESH; + rxdctl |= IGB_RX_HTHRESH << 8; + rxdctl |= IGB_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igb_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGB_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + wr32(E1000_RXDCTL(reg_idx), rxdctl); +} + +static void igb_set_rx_buffer_len(struct igb_adapter *adapter, + struct igb_ring *rx_ring) +{ + /* set build_skb and buffer size flags */ + clear_ring_build_skb_enabled(rx_ring); + clear_ring_uses_large_buffer(rx_ring); + + if (adapter->flags & IGB_FLAG_RX_LEGACY) + return; + + set_ring_build_skb_enabled(rx_ring); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + return; + + set_ring_uses_large_buffer(rx_ring); +#endif +} + +/** + * igb_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void igb_configure_rx(struct igb_adapter *adapter) +{ + int i; + + /* set the correct pool for the PF default MAC address in entry 0 */ + igb_set_default_mac_filter(adapter); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *rx_ring = adapter->rx_ring[i]; + + igb_set_rx_buffer_len(adapter, rx_ring); + igb_configure_rx_ring(adapter, rx_ring); + } +} + +/** + * igb_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void igb_free_tx_resources(struct igb_ring *tx_ring) +{ + igb_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igb_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void igb_free_all_tx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igb_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void igb_clean_tx_ring(struct igb_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + + while (i != tx_ring->next_to_use) { + union e1000_adv_tx_desc *eop_desc, *tx_desc; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGB_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igb_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_tx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igb_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void igb_free_rx_resources(struct igb_ring *rx_ring) +{ + igb_clean_rx_ring(rx_ring); + + rx_ring->xdp_prog = NULL; + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igb_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void igb_free_all_rx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igb_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void igb_clean_rx_ring(struct igb_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igb_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igb_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * igb_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_rx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igb_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int igb_set_mac(struct net_device *netdev, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igb_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igb_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igb_write_mc_addr_list(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igb_update_mc_addr_list(hw, NULL, 0); + igb_restore_vf_multicasts(adapter); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igb_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static int igb_vlan_promisc_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, pf_id; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + case e1000_i350: + /* VLAN filtering needed for VLAN prio filter */ + if (adapter->netdev->features & NETIF_F_NTUPLE) + break; + fallthrough; + case e1000_82576: + case e1000_82580: + case e1000_i354: + /* VLAN filtering needed for pool filtering */ + if (adapter->vfs_allocated_count) + break; + fallthrough; + default: + return 1; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + return 0; + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + /* Add PF to all active pools */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + vlvf |= BIT(pf_id); + wr32(E1000_VLVF(i), vlvf); + } + +set_vfta: + /* Set all bits in the VLAN filter table array */ + for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;) + hw->mac.ops.write_vfta(hw, i, ~0U); + + /* Set flag so we don't redo unnecessary work */ + adapter->flags |= IGB_FLAG_VLAN_PROMISC; + + return 0; +} + +#define VFTA_BLOCK_SIZE 8 +static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * 32; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); + u32 i, vid, word, bits, pf_id; + + /* guarantee that we don't scrub out management VLAN */ + vid = adapter->mng_vlan_id; + if (vid >= vid_start && vid < vid_end) + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + /* pull VLAN ID from VLVF */ + vid = vlvf & VLAN_VID_MASK; + + /* only concern ourselves with a certain range */ + if (vid < vid_start || vid >= vid_end) + continue; + + if (vlvf & E1000_VLVF_VLANID_ENABLE) { + /* record VLAN ID in VFTA */ + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); + + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + bits = ~BIT(pf_id); + bits &= rd32(E1000_VLVF(i)); + wr32(E1000_VLVF(i), bits); + } + +set_vfta: + /* extract values from active_vlans and write back to VFTA */ + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * 32; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->active_vlans[word] >> bits; + + hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]); + } +} + +static void igb_vlan_promisc_disable(struct igb_adapter *adapter) +{ + u32 i; + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + + for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE) + igb_scrub_vfta(adapter, i); +} + +/** + * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igb_set_rx_mode(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = adapter->vfs_allocated_count; + u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + + /* enable use of UTA filter to force packets to default pool */ + if (hw->mac.type == e1000_82576) + vmolr |= E1000_VMOLR_ROPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igb_write_mc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else if (count) { + vmolr |= E1000_VMOLR_ROMPE; + } + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) { + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } + + /* enable VLAN filtering by default */ + rctl |= E1000_RCTL_VFE; + + /* disable VLAN filtering for modes that require it */ + if ((netdev->flags & IFF_PROMISC) || + (netdev->features & NETIF_F_RXALL)) { + /* if we fail to set all rules then just clear VFE */ + if (igb_vlan_promisc_enable(adapter)) + rctl &= ~E1000_RCTL_VFE; + } else { + igb_vlan_promisc_disable(adapter); + } + + /* update state of unicast, multicast, and VLAN filtering modes */ + rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE | + E1000_RCTL_VFE); + wr32(E1000_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (!adapter->vfs_allocated_count) { + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + rlpml = IGB_MAX_FRAME_BUILD_SKB; + } +#endif + wr32(E1000_RLPML, rlpml); + + /* In order to support SR-IOV and eventually VMDq it is necessary to set + * the VMOLR to enable the appropriate modes. Without this workaround + * we will have issues with VLAN tag stripping not being done for frames + * that are only arriving because we are the default pool + */ + if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) + return; + + /* set UTA to appropriate mode */ + igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE)); + + vmolr |= rd32(E1000_VMOLR(vfn)) & + ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); + + /* enable Rx jumbo frames, restrict as needed to support build_skb */ + vmolr &= ~E1000_VMOLR_RLPML_MASK; +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + vmolr |= IGB_MAX_FRAME_BUILD_SKB; + else +#endif + vmolr |= MAX_JUMBO_FRAME_SIZE; + vmolr |= E1000_VMOLR_LPE; + + wr32(E1000_VMOLR(vfn), vmolr); + + igb_restore_vf_multicasts(adapter); +} + +static void igb_check_wvbr(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 wvbr = 0; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + wvbr = rd32(E1000_WVBR); + if (!wvbr) + return; + break; + default: + break; + } + + adapter->wvbr |= wvbr; +} + +#define IGB_STAGGERED_QUEUE_OFFSET 8 + +static void igb_spoof_check(struct igb_adapter *adapter) +{ + int j; + + if (!adapter->wvbr) + return; + + for (j = 0; j < adapter->vfs_allocated_count; j++) { + if (adapter->wvbr & BIT(j) || + adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) { + dev_warn(&adapter->pdev->dev, + "Spoof event(s) detected on VF %d\n", j); + adapter->wvbr &= + ~(BIT(j) | + BIT(j + IGB_STAGGERED_QUEUE_OFFSET)); + } + } +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igb_update_phy_info(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer); + igb_get_phy_info(&adapter->hw); +} + +/** + * igb_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + **/ +bool igb_has_link(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the e1000_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (!hw->mac.get_link_status) + return true; + fallthrough; + case e1000_media_type_internal_serdes: + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + break; + default: + case e1000_media_type_unknown: + break; + } + + if (((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) && + (hw->phy.id == I210_I_PHY_ID)) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) +{ + bool ret = false; + u32 ctrl_ext, thstat; + + /* check for thermal sensor event on i350 copper only */ + if (hw->mac.type == e1000_i350) { + thstat = rd32(E1000_THSTAT); + ctrl_ext = rd32(E1000_CTRL_EXT); + + if ((hw->phy.media_type == e1000_media_type_copper) && + !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) + ret = !!(thstat & event); + } + + return ret; +} + +/** + * igb_check_lvmmc - check for malformed packets received + * and indicated in LVMMC register + * @adapter: pointer to adapter + **/ +static void igb_check_lvmmc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 lvmmc; + + lvmmc = rd32(E1000_LVMMC); + if (lvmmc) { + if (unlikely(net_ratelimit())) { + netdev_warn(adapter->netdev, + "malformed Tx packet detected and dropped, LVMMC:0x%08x\n", + lvmmc); + } + } +} + +/** + * igb_watchdog - Timer Call-back + * @t: pointer to timer_list containing our private info pointer + **/ +static void igb_watchdog(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igb_watchdog_task(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, + struct igb_adapter, + watchdog_task); + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_info *phy = &hw->phy; + struct net_device *netdev = adapter->netdev; + u32 link; + int i; + u32 connsw; + u16 phy_data, retry_count = 20; + + link = igb_has_link(adapter); + + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + /* Force link down if we have fiber to swap to */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + if (hw->phy.media_type == e1000_media_type_copper) { + connsw = rd32(E1000_CONNSW); + if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) + link = 0; + } + } + if (link) { + /* Perform a reset if the media type changed. */ + if (hw->dev_spec._82575.media_changed) { + hw->dev_spec._82575.media_changed = false; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + igb_reset(adapter); + } + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(E1000_CTRL); + /* Links status message must follow this format */ + netdev_info(netdev, + "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && + (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : + (ctrl & E1000_CTRL_RFCE) ? "RX" : + (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGB_FLAG_EEE) && + (adapter->link_duplex == HALF_DUPLEX)) { + dev_info(&adapter->pdev->dev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); + adapter->hw.dev_spec._82575.eee_disable = true; + adapter->flags &= ~IGB_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igb_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_LINK_THROTTLE)) + netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igb_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); + } + } else { + dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + igb_ping_all_vfs(adapter); + igb_check_vf_rate_limit(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_PWR_DOWN)) { + netdev_err(netdev, "The network adapter was stopped because it overheated\n"); + } + + /* Links status message must follow this format */ + netdev_info(netdev, "igb: %s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + igb_ping_all_vfs(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* link is down, time to check for alternate media */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + } + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *tx_ring = adapter->tx_ring[i]; + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(E1000_EICS, eics); + } else { + wr32(E1000_ICS, E1000_ICS_RXDMT0); + } + + igb_spoof_check(adapter); + igb_ptp_rx_hang(adapter); + igb_ptp_tx_hang(adapter); + + /* Check LVMMC register on i350/i354 only */ + if ((adapter->hw.mac.type == e1000_i350) || + (adapter->hw.mac.type == e1000_i354)) + igb_check_lvmmc(adapter); + + /* Reset the timer */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * igb_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igb_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + **/ +static void igb_update_ring_itr(struct igb_q_vector *q_vector) +{ + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + struct igb_adapter *adapter = q_vector->adapter; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + if (adapter->link_speed != SPEED_1000) { + new_val = IGB_4K_ITR; + goto set_itr_val; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if ((avg_wire_size > 300) && (avg_wire_size < 1200)) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGB_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGB_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +/** + * igb_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + **/ +static void igb_update_itr(struct igb_q_vector *q_vector, + struct igb_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes/packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igb_set_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = IGB_4K_ITR; + goto set_itr_now; + } + + igb_update_itr(q_vector, &q_vector->tx); + igb_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGB_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGB_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGB_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct e1000_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + struct timespec64 ts; + + context_desc = IGB_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; + + /* For 82575, context index must be unique per ring. */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + ts = ktime_to_timespec64(first->skb->tstamp); + skb_txtime_consumed(first->skb); + context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); + } else { + context_desc->seqnum_seed = 0; + } +} + +static int igb_tso(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? + E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM | + IGB_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; + + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGB_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; + + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); +} + +#define IGB_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = E1000_ADVTXD_DTYP_DATA | + E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN, + (E1000_ADVTXD_DCMD_VLE)); + + /* set segmentation bits for TSO */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO, + (E1000_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, + (E1000_ADVTXD_MAC_TSTAMP)); + + /* insert frame checksum */ + cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igb_tx_olinfo_status(struct igb_ring *tx_ring, + union e1000_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; + + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + + /* insert L4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_CSUM, + (E1000_TXD_POPTS_TXSM << 8)); + + /* insert IPv4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_IPV4, + (E1000_TXD_POPTS_IXSM << 8)); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igb_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + if (igb_desc_unused(tx_ring) >= size) + return 0; + return __igb_maybe_stop_tx(tx_ring, size); +} + +static int igb_tx_map(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = igb_tx_cmd_type(skb, tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = IGB_TX_DESC(tx_ring, i); + + igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGB_MAX_DATA_PER_TXD; + size -= IGB_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + dma_wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + return 0; + +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +int igb_xmit_xdp_ring(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + struct xdp_frame *xdpf) +{ + union e1000_adv_tx_desc *tx_desc; + u32 len, cmd_type, olinfo_status; + struct igb_tx_buffer *tx_buffer; + dma_addr_t dma; + u16 i; + + len = xdpf->len; + + if (unlikely(!igb_desc_unused(tx_ring))) + return IGB_XDP_CONSUMED; + + dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + return IGB_XDP_CONSUMED; + + /* record the location of the first descriptor for this packet */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + i = tx_ring->next_to_use; + tx_desc = IGB_TX_DESC(tx_ring, i); + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + tx_buffer->type = IGB_TYPE_XDP; + tx_buffer->xdpf = xdpf; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = E1000_ADVTXD_DTYP_DATA | + E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; + cmd_type |= len | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT; + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount); + + /* set the timestamp */ + tx_buffer->time_stamp = jiffies; + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); + + /* set next_to_watch value indicating a packet is present */ + i++; + if (i == tx_ring->count) + i = 0; + + tx_buffer->next_to_watch = tx_desc; + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + writel(i, tx_ring->tail); + + return IGB_XDP_TX; +} + +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, + struct igb_ring *tx_ring) +{ + struct igb_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + u8 hdr_len = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igb_maybe_stop_tx(tx_ring, count + 3)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGB_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGB_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + if (adapter->hw.mac.type == e1000_82576) + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGB_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igb_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igb_tx_csum(tx_ring, first); + + if (igb_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; +cleanup_tx_tstamp: + if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) { + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + if (adapter->hw.mac.type == e1000_82576) + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + } + + return NETDEV_TX_OK; +} + +static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); +} + +/** + * igb_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + + if (hw->mac.type >= e1000_82580) + hw->dev_spec._82575.global_device_reset = true; + + schedule_work(&adapter->reset_task); + wr32(E1000_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +static void igb_reset_task(struct work_struct *work) +{ + struct igb_adapter *adapter; + adapter = container_of(work, struct igb_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igb_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igb_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igb_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + **/ +static void igb_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +/** + * igb_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int igb_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; + + if (adapter->xdp_prog) { + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + if (max_frame > igb_rx_bufsz(ring)) { + netdev_warn(adapter->netdev, + "Requested MTU size is not supported with XDP. Max frame size is %d\n", + max_frame); + return -EINVAL; + } + } + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igb_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igb_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igb_up(adapter); + else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + + return 0; +} + +/** + * igb_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void igb_update_stats(struct igb_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 reg, mpc; + int i; + u64 bytes, packets; + unsigned int start; + u64 _bytes, _packets; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + bytes = 0; + packets = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(E1000_RQDPC(i)); + if (hw->mac.type >= e1000_i210) + wr32(E1000_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(E1000_CRCERRS); + adapter->stats.gprc += rd32(E1000_GPRC); + adapter->stats.gorc += rd32(E1000_GORCL); + rd32(E1000_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(E1000_BPRC); + adapter->stats.mprc += rd32(E1000_MPRC); + adapter->stats.roc += rd32(E1000_ROC); + + adapter->stats.prc64 += rd32(E1000_PRC64); + adapter->stats.prc127 += rd32(E1000_PRC127); + adapter->stats.prc255 += rd32(E1000_PRC255); + adapter->stats.prc511 += rd32(E1000_PRC511); + adapter->stats.prc1023 += rd32(E1000_PRC1023); + adapter->stats.prc1522 += rd32(E1000_PRC1522); + adapter->stats.symerrs += rd32(E1000_SYMERRS); + adapter->stats.sec += rd32(E1000_SEC); + + mpc = rd32(E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(E1000_SCC); + adapter->stats.ecol += rd32(E1000_ECOL); + adapter->stats.mcc += rd32(E1000_MCC); + adapter->stats.latecol += rd32(E1000_LATECOL); + adapter->stats.dc += rd32(E1000_DC); + adapter->stats.rlec += rd32(E1000_RLEC); + adapter->stats.xonrxc += rd32(E1000_XONRXC); + adapter->stats.xontxc += rd32(E1000_XONTXC); + adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); + adapter->stats.xofftxc += rd32(E1000_XOFFTXC); + adapter->stats.fcruc += rd32(E1000_FCRUC); + adapter->stats.gptc += rd32(E1000_GPTC); + adapter->stats.gotc += rd32(E1000_GOTCL); + rd32(E1000_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(E1000_RNBC); + adapter->stats.ruc += rd32(E1000_RUC); + adapter->stats.rfc += rd32(E1000_RFC); + adapter->stats.rjc += rd32(E1000_RJC); + adapter->stats.tor += rd32(E1000_TORH); + adapter->stats.tot += rd32(E1000_TOTH); + adapter->stats.tpr += rd32(E1000_TPR); + + adapter->stats.ptc64 += rd32(E1000_PTC64); + adapter->stats.ptc127 += rd32(E1000_PTC127); + adapter->stats.ptc255 += rd32(E1000_PTC255); + adapter->stats.ptc511 += rd32(E1000_PTC511); + adapter->stats.ptc1023 += rd32(E1000_PTC1023); + adapter->stats.ptc1522 += rd32(E1000_PTC1522); + + adapter->stats.mptc += rd32(E1000_MPTC); + adapter->stats.bptc += rd32(E1000_BPTC); + + adapter->stats.tpt += rd32(E1000_TPT); + adapter->stats.colc += rd32(E1000_COLC); + + adapter->stats.algnerrc += rd32(E1000_ALGNERRC); + /* read internal phy specific stats */ + reg = rd32(E1000_CTRL_EXT); + if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { + adapter->stats.rxerrc += rd32(E1000_RXERRC); + + /* this stat has invalid values on i210/i211 */ + if ((hw->mac.type != e1000_i210) && + (hw->mac.type != e1000_i211)) + adapter->stats.tncrs += rd32(E1000_TNCRS); + } + + adapter->stats.tsctc += rd32(E1000_TSCTC); + adapter->stats.tsctfc += rd32(E1000_TSCTFC); + + adapter->stats.iac += rd32(E1000_IAC); + adapter->stats.icrxoc += rd32(E1000_ICRXOC); + adapter->stats.icrxptc += rd32(E1000_ICRXPTC); + adapter->stats.icrxatc += rd32(E1000_ICRXATC); + adapter->stats.ictxptc += rd32(E1000_ICTXPTC); + adapter->stats.ictxatc += rd32(E1000_ICTXATC); + adapter->stats.ictxqec += rd32(E1000_ICTXQEC); + adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); + adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(E1000_MGTPTC); + adapter->stats.mgprc += rd32(E1000_MGTPRC); + adapter->stats.mgpdc += rd32(E1000_MGTPDC); + + /* OS2BMC Stats */ + reg = rd32(E1000_MANC); + if (reg & E1000_MANC_EN_BMC2OS) { + adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); + adapter->stats.o2bspc += rd32(E1000_O2BSPC); + adapter->stats.b2ospc += rd32(E1000_B2OSPC); + adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); + } +} + +static void igb_tsync_interrupt(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR); + + if (tsicr & TSINTR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_SYS_WRAP; + } + + if (tsicr & E1000_TSICR_TXTS) { + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + ack |= E1000_TSICR_TXTS; + } + + if (tsicr & TSINTR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + /* u32 conversion of tv_sec is safe until y2106 */ + wr32(E1000_TRGTTIML0, ts.tv_nsec); + wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT0; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= TSINTR_TT0; + } + + if (tsicr & TSINTR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(E1000_TRGTTIML1, ts.tv_nsec); + wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT1; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= TSINTR_TT1; + } + + if (tsicr & TSINTR_AUTT0) { + nsec = rd32(E1000_AUXSTMPL0); + sec = rd32(E1000_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * 1000000000ULL + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_AUTT0; + } + + if (tsicr & TSINTR_AUTT1) { + nsec = rd32(E1000_AUXSTMPL1); + sec = rd32(E1000_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * 1000000000ULL + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(E1000_TSICR, ack); +} + +static irqreturn_t igb_msix_other(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct e1000_hw *hw = &adapter->hw; + u32 icr = rd32(E1000_ICR); + /* reading ICR causes bit 31 of EICR to be cleared */ + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + /* The DMA Out of Sync is also indication of a spoof event + * in IOV mode. Check the Wrong VM Behavior register to + * see if it is really a spoof event. + */ + igb_check_wvbr(adapter); + } + + /* Check for a mailbox event */ + if (icr & E1000_ICR_VMMB) + igb_msg_task(adapter); + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + wr32(E1000_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igb_write_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 itr_val = q_vector->itr_val & 0x7FFC; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = 0x4; + + if (adapter->hw.mac.type == e1000_82575) + itr_val |= itr_val << 16; + else + itr_val |= E1000_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igb_msix_ring(int irq, void *data) +{ + struct igb_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igb_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_IGB_DCA +static void igb_update_tx_dca(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + + if (hw->mac.type != e1000_82575) + txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; + + /* We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN | + E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); +} + +static void igb_update_rx_dca(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); + + if (hw->mac.type != e1000_82575) + rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; + + /* We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | + E1000_DCA_RXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); +} + +static void igb_update_dca(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + int cpu = get_cpu(); + + if (q_vector->cpu == cpu) + goto out_no_update; + + if (q_vector->tx.ring) + igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); + + if (q_vector->rx.ring) + igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); + + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +static void igb_setup_dca(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) + return; + + /* Always use CB2 mode, difference is masked in the CB driver. */ + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); + + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; + igb_update_dca(adapter->q_vector[i]); + } +} + +static int __igb_notify_dca(struct device *dev, void *data) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + unsigned long event = *(unsigned long *)data; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if already enabled, don't do it again */ + if (adapter->flags & IGB_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + break; + } + fallthrough; /* since DCA is disabled. */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + /* without this a class_device is left + * hanging around in the sysfs model + */ + dca_remove_requester(dev); + dev_info(&pdev->dev, "DCA disabled\n"); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } + break; + } + + return 0; +} + +static int igb_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, + __igb_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} +#endif /* CONFIG_IGB_DCA */ + +#ifdef CONFIG_PCI_IOV +static int igb_vf_configure(struct igb_adapter *adapter, int vf) +{ + unsigned char mac_addr[ETH_ALEN]; + + eth_zero_addr(mac_addr); + igb_set_vf_mac(adapter, vf, mac_addr); + + /* By default spoof check is enabled for all VFs */ + adapter->vf_data[vf].spoofchk_enabled = true; + + /* By default VFs are not trusted */ + adapter->vf_data[vf].trusted = false; + + return 0; +} + +#endif +static void igb_ping_all_vfs(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0 ; i < adapter->vfs_allocated_count; i++) { + ping = E1000_PF_CONTROL_MSG; + if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) + ping |= E1000_VT_MSGTYPE_CTS; + igb_write_mbx(hw, &ping, 1, i); + } +} + +static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr = rd32(E1000_VMOLR(vf)); + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | + IGB_VF_FLAG_MULTI_PROMISC); + vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { + vmolr |= E1000_VMOLR_MPME; + vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; + *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; + } else { + /* if we have hashes and we are clearing a multicast promisc + * flag we need to write the hashes to the MTA as this step + * was previously skipped + */ + if (vf_data->num_vf_mc_hashes > 30) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + int j; + + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + } + + wr32(E1000_VMOLR(vf), vmolr); + + /* there are flags left unprocessed, likely not supported */ + if (*msgbuf & E1000_VT_MSGINFO_MASK) + return -EINVAL; + + return 0; +} + +static int igb_set_vf_multicasts(struct igb_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + int i; + + /* salt away the number of multicast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vf_data->num_vf_mc_hashes = n; + + /* only up to 30 hash values supported */ + if (n > 30) + n = 30; + + /* store the hashes for later use */ + for (i = 0; i < n; i++) + vf_data->vf_mc_hashes[i] = hash_list[i]; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); + + return 0; +} + +static void igb_restore_vf_multicasts(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data; + int i, j; + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + u32 vmolr = rd32(E1000_VMOLR(i)); + + vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + vf_data = &adapter->vf_data[i]; + + if ((vf_data->num_vf_mc_hashes > 30) || + (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + wr32(E1000_VMOLR(i), vmolr); + } +} + +static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pool_mask, vlvf_mask, i; + + /* create mask for VF and other pools */ + pool_mask = E1000_VLVF_POOLSEL_MASK; + vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf); + + /* drop PF from pool bits */ + pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count); + + /* Find the vlan filter for this id */ + for (i = E1000_VLVF_ARRAY_SIZE; i--;) { + u32 vlvf = rd32(E1000_VLVF(i)); + u32 vfta_mask, vid, vfta; + + /* remove the vf from the pool */ + if (!(vlvf & vlvf_mask)) + continue; + + /* clear out bit from VLVF */ + vlvf ^= vlvf_mask; + + /* if other pools are present, just remove ourselves */ + if (vlvf & pool_mask) + goto update_vlvfb; + + /* if PF is present, leave VFTA */ + if (vlvf & E1000_VLVF_POOLSEL_MASK) + goto update_vlvf; + + vid = vlvf & E1000_VLVF_VLANID_MASK; + vfta_mask = BIT(vid % 32); + + /* clear bit from VFTA */ + vfta = adapter->shadow_vfta[vid / 32]; + if (vfta & vfta_mask) + hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask); +update_vlvf: + /* clear pool selection enable */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + vlvf &= E1000_VLVF_POOLSEL_MASK; + else + vlvf = 0; +update_vlvfb: + /* clear pool bits */ + wr32(E1000_VLVF(i), vlvf); + } +} + +static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan) +{ + u32 vlvf; + int idx; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the VLAN id in the VLVF entries */ + for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) { + vlvf = rd32(E1000_VLVF(idx)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + return idx; +} + +static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) +{ + struct e1000_hw *hw = &adapter->hw; + u32 bits, pf_id; + int idx; + + idx = igb_find_vlvf_entry(hw, vid); + if (!idx) + return; + + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK; + bits &= rd32(E1000_VLVF(idx)); + + /* Disable the filter so this falls into the default pool. */ + if (!bits) { + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + wr32(E1000_VLVF(idx), BIT(pf_id)); + else + wr32(E1000_VLVF(idx), 0); + } +} + +static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, + bool add, u32 vf) +{ + int pf_id = adapter->vfs_allocated_count; + struct e1000_hw *hw = &adapter->hw; + int err; + + /* If VLAN overlaps with one the PF is currently monitoring make + * sure that we are able to allocate a VLVF entry. This may be + * redundant but it guarantees PF will maintain visibility to + * the VLAN. + */ + if (add && test_bit(vid, adapter->active_vlans)) { + err = igb_vfta_set(hw, vid, pf_id, true, false); + if (err) + return err; + } + + err = igb_vfta_set(hw, vid, vf, add, false); + + if (add && !err) + return err; + + /* If we failed to add the VF VLAN or we are removing the VF VLAN + * we may need to drop the PF pool bit in order to allow us to free + * up the VLVF resources. + */ + if (test_bit(vid, adapter->active_vlans) || + (adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_update_pf_vlvf(adapter, vid); + + return err; +} + +static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + + if (vid) + wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); + else + wr32(E1000_VMVIR(vf), 0); +} + +static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf, + u16 vlan, u8 qos) +{ + int err; + + err = igb_set_vf_vlan(adapter, vlan, true, vf); + if (err) + return err; + + igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vlan); + + /* revoke access to previous VLAN */ + if (vlan != adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; + igb_set_vf_vlan_strip(adapter, vf, true); + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + + return err; +} + +static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf) +{ + /* Restore tagless access via VLAN 0 */ + igb_set_vf_vlan(adapter, 0, true, vf); + + igb_set_vmvir(adapter, 0, vf); + igb_set_vmolr(adapter, vf, true); + + /* Remove any PF assigned VLAN */ + if (adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; + igb_set_vf_vlan_strip(adapter, vf, false); + + return 0; +} + +static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) : + igb_disable_port_vlan(adapter, vf); +} + +static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + int ret; + + if (adapter->vf_data[vf].pf_vlan) + return -1; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + ret = igb_set_vf_vlan(adapter, vid, !!add, vf); + if (!ret) + igb_set_vf_vlan_strip(adapter, vf, !!vid); + return ret; +} + +static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) +{ + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + /* clear flags - except flag that indicates PF has set the MAC */ + vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC; + vf_data->last_nack = jiffies; + + /* reset vlans for device */ + igb_clear_vf_vfta(adapter, vf); + igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf); + igb_set_vmvir(adapter, vf_data->pf_vlan | + (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); + igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan)); + + /* reset multicast table array for vf */ + adapter->vf_data[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); +} + +static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) +{ + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + + /* clear mac address as we were hotplug removed/added */ + if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) + eth_zero_addr(vf_mac); + + /* process remaining reset events */ + igb_vf_reset(adapter, vf); +} + +static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + u32 reg, msgbuf[3]; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* process all the same items cleared in a function level reset */ + igb_vf_reset(adapter, vf); + + /* set vf mac address */ + igb_set_vf_mac(adapter, vf, vf_mac); + + /* enable transmit and receive for vf */ + reg = rd32(E1000_VFTE); + wr32(E1000_VFTE, reg | BIT(vf)); + reg = rd32(E1000_VFRE); + wr32(E1000_VFRE, reg | BIT(vf)); + + adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; + + /* reply to reset with ack and vf mac address */ + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK; + } + igb_write_mbx(hw, msgbuf, 3, vf); +} + +static void igb_flush_mac_table(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.rar_entry_count; i++) { + adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; + eth_zero_addr(adapter->mac_table[i].addr); + adapter->mac_table[i].queue = 0; + igb_rar_set_index(adapter, i); + } +} + +static int igb_available_rars(struct igb_adapter *adapter, u8 queue) +{ + struct e1000_hw *hw = &adapter->hw; + /* do not count rar entries reserved for VFs MAC addresses */ + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i, count = 0; + + for (i = 0; i < rar_entries; i++) { + /* do not count default entries */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) + continue; + + /* do not count "in use" entries for different queues */ + if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) && + (adapter->mac_table[i].queue != queue)) + continue; + + count++; + } + + return count; +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igb_set_default_mac_filter(struct igb_adapter *adapter) +{ + struct igb_mac_addr *mac_table = &adapter->mac_table[0]; + + ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); + mac_table->queue = adapter->vfs_allocated_count; + mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + + igb_rar_set_index(adapter, 0); +} + +/* If the filter to be added and an already existing filter express + * the same address and address type, it should be possible to only + * override the other configurations, for example the queue to steer + * traffic. + */ +static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry, + const u8 *addr, const u8 flags) +{ + if (!(entry->state & IGB_MAC_STATE_IN_USE)) + return true; + + if ((entry->state & IGB_MAC_STATE_SRC_ADDR) != + (flags & IGB_MAC_STATE_SRC_ADDR)) + return false; + + if (!ether_addr_equal(addr, entry->addr)) + return false; + + return true; +} + +/* Add a MAC filter for 'addr' directing matching traffic to 'queue', + * 'flags' is used to indicate what kind of match is made, match is by + * default for the destination address, if matching by source address + * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_add_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for the first empty entry in the MAC table. + * Do not touch entries at the end of the table reserved for the VF MAC + * addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!igb_mac_entry_can_be_used(&adapter->mac_table[i], + addr, flags)) + continue; + + ether_addr_copy(adapter->mac_table[i].addr, addr); + adapter->mac_table[i].queue = queue; + adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags; + + igb_rar_set_index(adapter, i); + return i; + } + + return -ENOSPC; +} + +static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + return igb_add_mac_filter_flags(adapter, addr, queue, 0); +} + +/* Remove a MAC filter for 'addr' directing matching traffic to + * 'queue', 'flags' is used to indicate what kind of match need to be + * removed, match is by default for the destination address, if + * matching by source address is to be removed the flag + * IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_del_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for matching entry in the MAC table based on given address + * and queue. Do not touch entries at the end of the table reserved + * for the VF MAC addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) + continue; + if ((adapter->mac_table[i].state & flags) != flags) + continue; + if (adapter->mac_table[i].queue != queue) + continue; + if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) + continue; + + /* When a filter for the default address is "deleted", + * we return it to its initial configuration + */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) { + adapter->mac_table[i].state = + IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + adapter->mac_table[i].queue = + adapter->vfs_allocated_count; + } else { + adapter->mac_table[i].state = 0; + adapter->mac_table[i].queue = 0; + eth_zero_addr(adapter->mac_table[i].addr); + } + + igb_rar_set_index(adapter, i); + return 0; + } + + return -ENOENT; +} + +static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, 0); +} + +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + + /* In theory, this should be supported on 82575 as well, but + * that part wasn't easily accessible during development. + */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + return igb_add_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + +static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count); + + return min_t(int, ret, 0); +} + +static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count); + + return 0; +} + +static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, + const u32 info, const u8 *addr) +{ + struct pci_dev *pdev = adapter->pdev; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + struct list_head *pos; + struct vf_mac_filter *entry = NULL; + int ret = 0; + + switch (info) { + case E1000_VF_MAC_FILTER_CLR: + /* remove all unicast MAC filters related to the current VF */ + list_for_each(pos, &adapter->vf_macs.l) { + entry = list_entry(pos, struct vf_mac_filter, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + igb_del_mac_filter(adapter, entry->vf_mac, vf); + } + } + break; + case E1000_VF_MAC_FILTER_ADD: + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { + dev_warn(&pdev->dev, + "VF %d requested MAC filter but is administratively denied\n", + vf); + return -EINVAL; + } + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC filter\n", + vf); + return -EINVAL; + } + + /* try to find empty slot in the list */ + list_for_each(pos, &adapter->vf_macs.l) { + entry = list_entry(pos, struct vf_mac_filter, l); + if (entry->free) + break; + } + + if (entry && entry->free) { + entry->free = false; + entry->vf = vf; + ether_addr_copy(entry->vf_mac, addr); + + ret = igb_add_mac_filter(adapter, addr, vf); + ret = min_t(int, ret, 0); + } else { + ret = -ENOSPC; + } + + if (ret == -ENOSPC) + dev_warn(&pdev->dev, + "VF %d has requested MAC filter but there is no space for it\n", + vf); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) +{ + struct pci_dev *pdev = adapter->pdev; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + u32 info = msg[0] & E1000_VT_MSGINFO_MASK; + + /* The VF MAC Address is stored in a packed array of bytes + * starting at the second 32 bit word of the msg array + */ + unsigned char *addr = (unsigned char *)&msg[1]; + int ret = 0; + + if (!info) { + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { + dev_warn(&pdev->dev, + "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", + vf); + return -EINVAL; + } + + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC\n", + vf); + return -EINVAL; + } + + ret = igb_set_vf_mac(adapter, vf, addr); + } else { + ret = igb_set_vf_mac_filter(adapter, vf, info, addr); + } + + return ret; +} + +static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + u32 msg = E1000_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!(vf_data->flags & IGB_VF_FLAG_CTS) && + time_after(jiffies, vf_data->last_nack + (2 * HZ))) { + igb_write_mbx(hw, &msg, 1, vf); + vf_data->last_nack = jiffies; + } +} + +static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct pci_dev *pdev = adapter->pdev; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + s32 retval; + + retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false); + + if (retval) { + /* if receive failed revoke VF CTS stats and restart init */ + dev_err(&pdev->dev, "Error receiving message from VF\n"); + vf_data->flags &= ~IGB_VF_FLAG_CTS; + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + goto unlock; + goto out; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) + goto unlock; + + /* until the vf completes a reset it should not be + * allowed to start any configuration. + */ + if (msgbuf[0] == E1000_VF_RESET) { + /* unlocks mailbox */ + igb_vf_reset_msg(adapter, vf); + return; + } + + if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + goto unlock; + retval = -1; + goto out; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case E1000_VF_SET_MAC_ADDR: + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case E1000_VF_SET_PROMISC: + retval = igb_set_vf_promisc(adapter, msgbuf, vf); + break; + case E1000_VF_SET_MULTICAST: + retval = igb_set_vf_multicasts(adapter, msgbuf, vf); + break; + case E1000_VF_SET_LPE: + retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); + break; + case E1000_VF_SET_VLAN: + retval = -1; + if (vf_data->pf_vlan) + dev_warn(&pdev->dev, + "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", + vf); + else + retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + default: + dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); + retval = -1; + break; + } + + msgbuf[0] |= E1000_VT_MSGTYPE_CTS; +out: + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; + else + msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + + /* unlocks mailbox */ + igb_write_mbx(hw, msgbuf, 1, vf); + return; + +unlock: + igb_unlock_mbx(hw, vf); +} + +static void igb_msg_task(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vf; + + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { + /* process any reset requests */ + if (!igb_check_for_rst(hw, vf)) + igb_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!igb_check_for_msg(hw, vf)) + igb_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!igb_check_for_ack(hw, vf)) + igb_rcv_ack_from_vf(adapter, vf); + } +} + +/** + * igb_set_uta - Set unicast filter table address + * @adapter: board private structure + * @set: boolean indicating if we are setting or clearing bits + * + * The unicast table address is a register array of 32-bit registers. + * The table is meant to be used in a way similar to how the MTA is used + * however due to certain limitations in the hardware it is necessary to + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous + * enable bit to allow vlan tag stripping when promiscuous mode is enabled + **/ +static void igb_set_uta(struct igb_adapter *adapter, bool set) +{ + struct e1000_hw *hw = &adapter->hw; + u32 uta = set ? ~0 : 0; + int i; + + /* we only need to do this if VMDq is enabled */ + if (!adapter->vfs_allocated_count) + return; + + for (i = hw->mac.uta_reg_count; i--;) + array_wr32(E1000_UTA, i, uta); +} + +/** + * igb_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr_msi(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(E1000_ICR); + + igb_write_itr(q_vector); + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igb_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(E1000_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igb_write_itr(q_vector); + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igb_ring_irq_enable(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if ((adapter->num_q_vectors == 1) && !adapter->vf_data) + igb_set_itr(q_vector); + else + igb_update_ring_itr(q_vector); + } + + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) + wr32(E1000_EIMS, q_vector->eims_value); + else + igb_irq_enable(adapter); + } +} + +/** + * igb_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + **/ +static int igb_poll(struct napi_struct *napi, int budget) +{ + struct igb_q_vector *q_vector = container_of(napi, + struct igb_q_vector, + napi); + bool clean_complete = true; + int work_done = 0; + +#ifdef CONFIG_IGB_DCA + if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) + igb_update_dca(q_vector); +#endif + if (q_vector->tx.ring) + clean_complete = igb_clean_tx_irq(q_vector, budget); + + if (q_vector->rx.ring) { + int cleaned = igb_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igb_ring_irq_enable(q_vector); + + return min(work_done, budget - 1); +} + +/** + * igb_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + **/ +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *tx_ring = q_vector->tx.ring; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__IGB_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGB_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + if (tx_buffer->type == IGB_TYPE_SKB) + napi_consume_skb(tx_buffer->skb, napi_budget); + else + xdp_return_frame(tx_buffer->xdpf); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + dma_unmap_len_set(tx_buffer, len, 0); + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; + u64_stats_update_end(&tx_ring->tx_syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct e1000_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + dev_err(tx_ring->dev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(E1000_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && + netif_carrier_ok(tx_ring->netdev) && + igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +/** + * igb_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void igb_reuse_rx_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *old_buff) +{ + struct igb_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, + int rx_buf_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGB_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGB_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igb_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + **/ +static void igb_add_rx_frag(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGB_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + unsigned int size = xdp->data_end - xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGB_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (xdp->data + headlen) - page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* build an skb around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); + + if (metasize) + skb_metadata_set(skb, metasize); + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + struct xdp_buff *xdp) +{ + int err, result = IGB_XDP_PASS; + struct bpf_prog *xdp_prog; + u32 act; + + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) + goto xdp_out; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + result = igb_xdp_xmit_back(adapter, xdp); + if (result == IGB_XDP_CONSUMED) + goto out_failure; + break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (err) + goto out_failure; + result = IGB_XDP_REDIR; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = IGB_XDP_CONSUMED; + break; + } +xdp_out: + return ERR_PTR(-result); +} + +static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGB_SKB_PAD + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +static void igb_rx_buffer_flip(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + unsigned int size) +{ + unsigned int truesize = igb_rx_frame_truesize(rx_ring, size); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static inline void igb_rx_checksum(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igb_test_staterr(rx_desc, + E1000_RXDEXT_STATERR_TCPE | + E1000_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets, (aka let the stack check the crc32c) + */ + if (!((skb->len == 60) && + test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | + E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(ring->dev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +static inline void igb_rx_hash(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); +} + +/** + * igb_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool igb_is_non_eop(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGB_RX_DESC(rx_ring, ntc)); + + if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igb_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool igb_cleanup_headers(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely((igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { + struct net_device *netdev = rx_ring->netdev; + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * igb_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + + igb_rx_hash(rx_ring, rx_desc, skb); + + igb_rx_checksum(rx_ring, rx_desc, skb); + + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && + !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { + u16 vid; + + if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && + test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static unsigned int igb_rx_offset(struct igb_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; +} + +static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, + const unsigned int size, int *rx_buf_pgcnt) +{ + struct igb_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buf_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igb_put_rx_buffer(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt) +{ + if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) { + /* hand second half of page back to the ring */ + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = igb_desc_unused(rx_ring); + unsigned int xdp_xmit = 0; + struct xdp_buff xdp; + u32 frame_sz = 0; + int rx_buf_pgcnt; + + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#if (PAGE_SIZE < 8192) + frame_sz = igb_rx_frame_truesize(rx_ring, 0); +#endif + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); + + while (likely(total_packets < budget)) { + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *rx_buffer; + ktime_t timestamp = 0; + int pkt_offset = 0; + unsigned int size; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGB_RX_BUFFER_WRITE) { + igb_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + /* pull rx packet timestamp if available and valid */ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + int ts_hdr_len; + + ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector, + pktbuf, ×tamp); + + pkt_offset += ts_hdr_len; + size -= ts_hdr_len; + } + + /* retrieve a buffer from the ring */ + if (!skb) { + unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring); + unsigned int offset = pkt_offset + igb_rx_offset(rx_ring); + + xdp_prepare_buff(&xdp, hard_start, offset, size, true); +#if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); +#endif + skb = igb_run_xdp(adapter, rx_ring, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) { + xdp_xmit |= xdp_res; + igb_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_packets++; + total_bytes += size; + } else if (skb) + igb_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igb_build_skb(rx_ring, rx_buffer, &xdp, + timestamp); + else + skb = igb_construct_skb(rx_ring, rx_buffer, + &xdp, timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt); + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igb_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + if (xdp_xmit & IGB_XDP_REDIR) + xdp_do_flush(); + + if (xdp_xmit & IGB_XDP_TX) { + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); + + igb_xdp_ring_update_tail(tx_ring); + } + + u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; + u64_stats_update_end(&rx_ring->rx_syncp); + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; + + if (cleaned_count) + igb_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igb_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igb_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, igb_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igb_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igb_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: rx descriptor ring to allocate new receive buffers + * @cleaned_count: count of buffers to allocate + **/ +void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGB_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igb_rx_bufsz(rx_ring); + + do { + if (!igb_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGB_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, rx_ring->tail); + } +} + +/** + * igb_mii_ioctl - + * @netdev: pointer to netdev struct + * @ifr: interface structure + * @cmd: ioctl command to execute + **/ +static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) + return -EIO; + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * igb_ioctl - + * @netdev: pointer to netdev struct + * @ifr: interface structure + * @cmd: ioctl command to execute + **/ +static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return igb_mii_ioctl(netdev, ifr, cmd); + case SIOCGHWTSTAMP: + return igb_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igb_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_read_word(adapter->pdev, reg, value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_write_word(adapter->pdev, reg, *value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl; + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + + /* Disable CFI check */ + rctl = rd32(E1000_RCTL); + rctl &= ~E1000_RCTL_CFIEN; + wr32(E1000_RCTL, rctl); + } else { + /* disable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl &= ~E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + } + + igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable); +} + +static int igb_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + + /* add the filter since PF can receive vlans w/o entry in vlvf */ + if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, true, !!vid); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int igb_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int pf_id = adapter->vfs_allocated_count; + struct e1000_hw *hw = &adapter->hw; + + /* remove VID from filter table */ + if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, false, true); + + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +static void igb_restore_vlan(struct igb_adapter *adapter) +{ + u16 vid = 1; + + igb_vlan_mode(adapter->netdev, adapter->netdev->features); + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NIC's only allow 1000 gbps Full duplex + * and 100Mbps Full duplex for 100baseFx sfp + */ + if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + case SPEED_10 + DUPLEX_FULL: + case SPEED_100 + DUPLEX_HALF: + goto err_inval; + default: + break; + } + } + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + bool wake; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igb_close(netdev, true); + + igb_ptp_suspend(adapter); + + igb_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + igb_setup_rctl(adapter); + igb_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = rd32(E1000_RCTL); + rctl |= E1000_RCTL_MPE; + wr32(E1000_RCTL, rctl); + } + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_ADVD3WUC; + wr32(E1000_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igb_disable_pcie_master(hw); + + wr32(E1000_WUC, E1000_WUC_PME_EN); + wr32(E1000_WUFC, wufc); + } else { + wr32(E1000_WUC, 0); + wr32(E1000_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igb_power_down_link(adapter); + else + igb_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +static void igb_deliver_wake_packet(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if ((wupl == 0) || (wupl > E1000_WUPM_BYTES)) + return; + + skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igb_suspend(struct device *dev) +{ + return __igb_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused igb_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "igb: Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + val = rd32(E1000_WUS); + if (val & WAKE_PKT_WUS) + igb_deliver_wake_packet(netdev); + + wr32(E1000_WUS, ~0); + + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igb_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; +} + +static int __maybe_unused igb_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (!igb_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} + +static int __maybe_unused igb_runtime_suspend(struct device *dev) +{ + return __igb_shutdown(to_pci_dev(dev), NULL, 1); +} + +static int __maybe_unused igb_runtime_resume(struct device *dev) +{ + return igb_resume(dev); +} + +static void igb_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igb_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PCI_IOV +static int igb_sriov_reinit(struct pci_dev *dev) +{ + struct net_device *netdev = pci_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + rtnl_lock(); + + if (netif_running(netdev)) + igb_close(netdev); + else + igb_reset(adapter); + + igb_clear_interrupt_scheme(adapter); + + igb_init_queue_configuration(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + rtnl_unlock(); + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + igb_open(netdev); + + rtnl_unlock(); + + return 0; +} + +static int igb_pci_disable_sriov(struct pci_dev *dev) +{ + int err = igb_disable_sriov(dev); + + if (!err) + err = igb_sriov_reinit(dev); + + return err; +} + +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) +{ + int err = igb_enable_sriov(dev, num_vfs); + + if (err) + goto out; + + err = igb_sriov_reinit(dev); + if (!err) + return num_vfs; + +out: + return err; +} + +#endif +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + if (num_vfs == 0) + return igb_pci_disable_sriov(dev); + else + return igb_pci_enable_sriov(dev, num_vfs); +#endif + return 0; +} + +/** + * igb_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igb_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igb_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igb_resume routine. + **/ +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter lose its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igb_reset(adapter); + wr32(E1000_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igb_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igb_resume routine. + */ +static void igb_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (igb_up(adapter)) { + dev_err(&pdev->dev, "igb_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); +} + +/** + * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table + * @adapter: Pointer to adapter structure + * @index: Index of the RAR entry which need to be synced with MAC table + **/ +static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rar_low, rar_high; + u8 *addr = adapter->mac_table[index].addr; + + /* HW expects these to be in network order when they are plugged + * into the registers which are little endian. In order to guarantee + * that ordering we need to do an leXX_to_cpup here in order to be + * ready for the byteswap that occurs with writel + */ + rar_low = le32_to_cpup((__le32 *)(addr)); + rar_high = le16_to_cpup((__le16 *)(addr + 4)); + + /* Indicate to hardware the Address is Valid. */ + if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) { + if (is_valid_ether_addr(addr)) + rar_high |= E1000_RAH_AV; + + if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR) + rar_high |= E1000_RAH_ASEL_SRC_ADDR; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: + if (adapter->mac_table[index].state & + IGB_MAC_STATE_QUEUE_STEERING) + rar_high |= E1000_RAH_QSEL_ENABLE; + + rar_high |= E1000_RAH_POOL_1 * + adapter->mac_table[index].queue; + break; + default: + rar_high |= E1000_RAH_POOL_1 << + adapter->mac_table[index].queue; + break; + } + } + + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +static int igb_set_vf_mac(struct igb_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + struct e1000_hw *hw = &adapter->hw; + /* VF MAC addresses start at end of receive addresses and moves + * towards the first, as a result a collision should not be possible + */ + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses; + + ether_addr_copy(vf_mac_addr, mac_addr); + ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr); + adapter->mac_table[rar_entry].queue = vf; + adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE; + igb_rar_set_index(adapter, rar_entry); + + return 0; +} + +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC + * flag and allows to overwrite the MAC via VF netdev. This + * is necessary to allow libvirt a way to restore the original + * MAC after unbinding vfio-pci and reloading igbvf after shutting + * down a VM. + */ + if (is_zero_ether_addr(mac)) { + adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, + "remove administratively set MAC on VF %d\n", + vf); + } else if (is_valid_ether_addr(mac)) { + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", + mac, vf); + dev_info(&adapter->pdev->dev, + "Reload the VF driver to make this change effective."); + /* Generate additional warning if PF is down */ + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { + return -EINVAL; + } + return igb_set_vf_mac(adapter, vf, mac); +} + +static int igb_link_mbps(int internal_link_speed) +{ + switch (internal_link_speed) { + case SPEED_100: + return 100; + case SPEED_1000: + return 1000; + default: + return 0; + } +} + +static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, + int link_speed) +{ + int rf_dec, rf_int; + u32 bcnrc_val; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); + rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) / + tx_rate; + + bcnrc_val = E1000_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) & + E1000_RTTBCNRC_RF_INT_MASK); + bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ + /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. + */ + wr32(E1000_RTTBCNRM, 0x14); + wr32(E1000_RTTBCNRC, bcnrc_val); +} + +static void igb_check_vf_rate_limit(struct igb_adapter *adapter) +{ + int actual_link_speed, i; + bool reset_rate = false; + + /* VF TX rate limit was not set or not supported */ + if ((adapter->vf_rate_link_speed == 0) || + (adapter->hw.mac.type != e1000_82576)) + return; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if (actual_link_speed != adapter->vf_rate_link_speed) { + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + if (reset_rate) + adapter->vf_data[i].tx_rate = 0; + + igb_set_vf_rate_limit(&adapter->hw, i, + adapter->vf_data[i].tx_rate, + actual_link_speed); + } +} + +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int actual_link_speed; + + if (hw->mac.type != e1000_82576) + return -EOPNOTSUPP; + + if (min_tx_rate) + return -EINVAL; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if ((vf >= adapter->vfs_allocated_count) || + (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || + (max_tx_rate < 0) || + (max_tx_rate > actual_link_speed)) + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; + adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; + igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); + + return 0; +} + +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 reg_val, reg_offset; + + if (!adapter->vfs_allocated_count) + return -EOPNOTSUPP; + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; + reg_val = rd32(reg_offset); + if (setting) + reg_val |= (BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); + else + reg_val &= ~(BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); + wr32(reg_offset, reg_val); + + adapter->vf_data[vf].spoofchk_enabled = setting; + return 0; +} + +static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + if (adapter->vf_data[vf].trusted == setting) + return 0; + + adapter->vf_data[vf].trusted = setting; + + dev_info(&adapter->pdev->dev, "VF %u is %strusted\n", + vf, setting ? "" : "not "); + return 0; +} + +static int igb_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); + ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; + ivi->min_tx_rate = 0; + ivi->vlan = adapter->vf_data[vf].pf_vlan; + ivi->qos = adapter->vf_data[vf].pf_qos; + ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; + ivi->trusted = adapter->vf_data[vf].trusted; + return 0; +} + +static void igb_vmm_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: + case e1000_i211: + case e1000_i354: + default: + /* replication is not supported for 82575 */ + return; + case e1000_82576: + /* notify HW that the MAC is adding vlan tags */ + reg = rd32(E1000_DTXCTL); + reg |= E1000_DTXCTL_VLAN_ADDED; + wr32(E1000_DTXCTL, reg); + fallthrough; + case e1000_82580: + /* enable replication vlan tag stripping */ + reg = rd32(E1000_RPLOLR); + reg |= E1000_RPLOLR_STRVLAN; + wr32(E1000_RPLOLR, reg); + fallthrough; + case e1000_i350: + /* none of the above registers are supported by i350 */ + break; + } + + if (adapter->vfs_allocated_count) { + igb_vmdq_set_loopback_pf(hw, true); + igb_vmdq_set_replication_pf(hw, true); + igb_vmdq_set_anti_spoofing_pf(hw, true, + adapter->vfs_allocated_count); + } else { + igb_vmdq_set_loopback_pf(hw, false); + igb_vmdq_set_replication_pf(hw, false); + } +} + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) +{ + struct e1000_hw *hw = &adapter->hw; + u32 dmac_thr; + u16 hwm; + + if (hw->mac.type > e1000_82580) { + if (adapter->flags & IGB_FLAG_DMAC) { + u32 reg; + + /* force threshold to 0. */ + wr32(E1000_DMCTXTH, 0); + + /* DMA Coalescing high water mark needs to be greater + * than the Rx threshold. Set hwm to PBA - max frame + * size in 16B units, capping it at PBA - 6KB. + */ + hwm = 64 * (pba - 6); + reg = rd32(E1000_FCRTC); + reg &= ~E1000_FCRTC_RTH_COAL_MASK; + reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) + & E1000_FCRTC_RTH_COAL_MASK); + wr32(E1000_FCRTC, reg); + + /* Set the DMA Coalescing Rx threshold to PBA - 2 * max + * frame size, capping it at PBA - 10KB. + */ + dmac_thr = pba - 10; + reg = rd32(E1000_DMACR); + reg &= ~E1000_DMACR_DMACTHR_MASK; + reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); + + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + + /* watchdog timer= +-1000 usec in 32usec intervals */ + reg |= (1000 >> 5); + + /* Disable BMC-to-OS Watchdog Enable */ + if (hw->mac.type != e1000_i354) + reg &= ~E1000_DMACR_DC_BMC2OSW_EN; + + wr32(E1000_DMACR, reg); + + /* no lower threshold to disable + * coalescing(smart fifb)-UTRESH=0 + */ + wr32(E1000_DMCRTRH, 0); + + reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); + + wr32(E1000_DMCTLX, reg); + + /* free space in tx packet buffer to wake from + * DMA coal + */ + wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - + (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + + /* make low power state decision controlled + * by DMA coal + */ + reg = rd32(E1000_PCIEMISC); + reg &= ~E1000_PCIEMISC_LX_DECISION; + wr32(E1000_PCIEMISC, reg); + } /* endif adapter->dmac is not disabled */ + } else if (hw->mac.type == e1000_82580) { + u32 reg = rd32(E1000_PCIEMISC); + + wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); + wr32(E1000_DMACR, 0); + } +} + +/** + * igb_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = 0; + + if (!this_client) + return E1000_ERR_I2C; + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + + status = i2c_smbus_read_byte_data(this_client, byte_offset); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status < 0) + return E1000_ERR_I2C; + else { + *data = status; + return 0; + } +} + +/** + * igb_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = E1000_SWFW_PHY0_SM; + + if (!this_client) + return E1000_ERR_I2C; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + status = i2c_smbus_write_byte_data(this_client, byte_offset, data); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status) + return E1000_ERR_I2C; + else + return 0; + +} + +int igb_reinit_queues(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (netif_running(netdev)) + igb_close(netdev); + + igb_reset_interrupt_capability(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igb_open(netdev); + + return err; +} + +static void igb_nfc_filter_exit(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_erase_filter(adapter, rule); + + hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) + igb_erase_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} + +static void igb_nfc_filter_restore(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_add_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} +/* igb_main.c */ diff --git a/devices/igb/igb_main-6.1-ethercat.c b/devices/igb/igb_main-6.1-ethercat.c new file mode 100644 index 00000000..13b860ce --- /dev/null +++ b/devices/igb/igb_main-6.1-ethercat.c @@ -0,0 +1,10293 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_IGB_DCA +#include +#endif +#include +#include "igb-6.1-ethercat.h" + +enum queue_mode { + QUEUE_MODE_STRICT_PRIORITY, + QUEUE_MODE_STREAM_RESERVATION, +}; + +enum tx_queue_prio { + TX_QUEUE_PRIO_HIGH, + TX_QUEUE_PRIO_LOW, +}; + +char igb_driver_name[] = "ec_igb"; +static const char igb_driver_string[] = + "Intel(R) Gigabit Ethernet Network Driver (EtherCAT enabled)"; +static const char igb_copyright[] = + "Copyright (c) 2007-2014 Intel Corporation."; + +static const struct e1000_info *igb_info_tbl[] = { + [board_82575] = &e1000_82575_info, +}; + +static const struct pci_device_id igb_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, + /* required last entry */ + {0, } +}; + +// MODULE_DEVICE_TABLE(pci, igb_pci_tbl); + +static int igb_setup_all_tx_resources(struct igb_adapter *); +static int igb_setup_all_rx_resources(struct igb_adapter *); +static void igb_free_all_tx_resources(struct igb_adapter *); +static void igb_free_all_rx_resources(struct igb_adapter *); +static void igb_setup_mrqc(struct igb_adapter *); +static int igb_probe(struct pci_dev *, const struct pci_device_id *); +static void igb_remove(struct pci_dev *pdev); +static int igb_sw_init(struct igb_adapter *); +int igb_open(struct net_device *); +int igb_close(struct net_device *); +static void igb_configure(struct igb_adapter *); +static void igb_configure_tx(struct igb_adapter *); +static void igb_configure_rx(struct igb_adapter *); +static void igb_clean_all_tx_rings(struct igb_adapter *); +static void igb_clean_all_rx_rings(struct igb_adapter *); +static void igb_clean_tx_ring(struct igb_ring *); +static void igb_clean_rx_ring(struct igb_ring *); +static void igb_set_rx_mode(struct net_device *); +static void igb_update_phy_info(struct timer_list *); +static void igb_watchdog(struct timer_list *); +static void igb_watchdog_task(struct work_struct *); +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); +static void igb_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static int igb_change_mtu(struct net_device *, int); +static int igb_set_mac(struct net_device *, void *); +static void igb_set_uta(struct igb_adapter *adapter, bool set); +static irqreturn_t igb_intr(int irq, void *); +static irqreturn_t igb_intr_msi(int irq, void *); +static irqreturn_t igb_msix_other(int irq, void *); +static irqreturn_t igb_msix_ring(int irq, void *); +#ifdef CONFIG_IGB_DCA +static void igb_update_dca(struct igb_q_vector *); +static void igb_setup_dca(struct igb_adapter *); +#endif /* CONFIG_IGB_DCA */ +static int igb_poll(struct napi_struct *, int); +static bool igb_clean_tx_irq(struct igb_q_vector *, int); +static int igb_clean_rx_irq(struct igb_q_vector *, int); +static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); +static void igb_tx_timeout(struct net_device *, unsigned int txqueue); +static void igb_reset_task(struct work_struct *); +static void igb_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); +static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); +static void igb_restore_vlan(struct igb_adapter *); +static void igb_rar_set_index(struct igb_adapter *, u32); +static void igb_ping_all_vfs(struct igb_adapter *); +static void igb_msg_task(struct igb_adapter *); +static void igb_vmm_control(struct igb_adapter *); +static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); +static void igb_flush_mac_table(struct igb_adapter *); +static int igb_available_rars(struct igb_adapter *, u8); +static void igb_set_default_mac_filter(struct igb_adapter *); +static int igb_uc_sync(struct net_device *, const unsigned char *); +static int igb_uc_unsync(struct net_device *, const unsigned char *); +static void igb_restore_vf_multicasts(struct igb_adapter *adapter); +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos, __be16 vlan_proto); +static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting); +static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, + bool setting); +static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); +static void igb_check_vf_rate_limit(struct igb_adapter *); +static void igb_nfc_filter_exit(struct igb_adapter *adapter); +static void igb_nfc_filter_restore(struct igb_adapter *adapter); + +#ifdef CONFIG_PCI_IOV +static int igb_vf_configure(struct igb_adapter *adapter, int vf); +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); +static int igb_disable_sriov(struct pci_dev *dev); +static int igb_pci_disable_sriov(struct pci_dev *dev); +#endif + +static int igb_suspend(struct device *); +static int igb_resume(struct device *); +static int igb_runtime_suspend(struct device *dev); +static int igb_runtime_resume(struct device *dev); +static int igb_runtime_idle(struct device *dev); +static const struct dev_pm_ops igb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) + SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle) +}; +static void igb_shutdown(struct pci_dev *); +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#ifdef CONFIG_IGB_DCA +static int igb_notify_dca(struct notifier_block *, unsigned long, void *); +static struct notifier_block dca_notifier = { + .notifier_call = igb_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); +#endif /* CONFIG_PCI_IOV */ + +static pci_ers_result_t igb_io_error_detected(struct pci_dev *, + pci_channel_state_t); +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); +static void igb_io_resume(struct pci_dev *); + +static const struct pci_error_handlers igb_err_handler = { + .error_detected = igb_io_error_detected, + .slot_reset = igb_io_slot_reset, + .resume = igb_io_resume, +}; + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); + +static struct pci_driver igb_driver = { + .name = igb_driver_name, + .id_table = igb_pci_tbl, + .probe = igb_probe, + .remove = igb_remove, +#ifdef CONFIG_PM + .driver.pm = &igb_pm_ops, +#endif + .shutdown = igb_shutdown, + .sriov_configure = igb_pci_sriov_configure, + .err_handler = &igb_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +struct igb_reg_info { + u32 ofs; + char *name; +}; + +static const struct igb_reg_info igb_reg_info_tbl[] = { + + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* RX Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* igb_regdump - register printout routine */ +static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDLEN(n)); + break; + case E1000_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDH(n)); + break; + case E1000_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDT(n)); + break; + case E1000_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RXDCTL(n)); + break; + case E1000_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAL(n)); + break; + case E1000_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAH(n)); + break; + case E1000_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDBAL(n)); + break; + case E1000_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDBAH(n)); + break; + case E1000_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDLEN(n)); + break; + case E1000_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDH(n)); + break; + case E1000_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDT(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TXDCTL(n)); + break; + default: + pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igb_dump - Print registers, Tx-rings and Rx-rings */ +static void igb_dump(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_info *reginfo; + struct igb_ring *tx_ring; + union e1000_adv_tx_desc *tx_desc; + struct my_u0 { __le64 a; __le64 b; } *u0; + struct igb_ring *rx_ring; + union e1000_adv_rx_desc *rx_desc; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; + reginfo->name; reginfo++) { + igb_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igb_tx_buffer *buffer_info; + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igb_tx_buffer *buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info(" %5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igb_rx_buffer *buffer_info; + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGB_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address(buffer_info->page) + + buffer_info->page_offset, + igb_rx_bufsz(rx_ring), true); + } + } + } + } + +exit: + return; +} + +/** + * igb_get_i2c_data - Reads the I2C SDA data bit + * @data: opaque pointer to adapter struct + * + * Returns the I2C data bit value + **/ +static int igb_get_i2c_data(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return !!(i2cctl & E1000_I2C_DATA_IN); +} + +/** + * igb_set_i2c_data - Sets the I2C data bit + * @data: pointer to hardware structure + * @state: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static void igb_set_i2c_data(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) { + i2cctl |= E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N; + } else { + i2cctl &= ~E1000_I2C_DATA_OE_N; + i2cctl &= ~E1000_I2C_DATA_OUT; + } + + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} + +/** + * igb_set_i2c_clk - Sets the I2C SCL clock + * @data: pointer to hardware structure + * @state: state to set clock + * + * Sets the I2C clock line to state + **/ +static void igb_set_i2c_clk(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) { + i2cctl |= E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N; + } else { + i2cctl &= ~E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} + +/** + * igb_get_i2c_clk - Gets the I2C SCL clock state + * @data: pointer to hardware structure + * + * Gets the I2C clock state + **/ +static int igb_get_i2c_clk(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return !!(i2cctl & E1000_I2C_CLK_IN); +} + +static const struct i2c_algo_bit_data igb_i2c_algo = { + .setsda = igb_set_i2c_data, + .setscl = igb_set_i2c_clk, + .getsda = igb_get_i2c_data, + .getscl = igb_get_i2c_clk, + .udelay = 5, + .timeout = 20, +}; + +/** + * igb_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + **/ +struct net_device *igb_get_hw_dev(struct e1000_hw *hw) +{ + struct igb_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * igb_init_module - Driver Registration Routine + * + * igb_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init igb_init_module(void) +{ + int ret; + + pr_info("%s\n", igb_driver_string); + pr_info("%s\n", igb_copyright); + +#ifdef CONFIG_IGB_DCA + dca_register_notify(&dca_notifier); +#endif + ret = pci_register_driver(&igb_driver); + return ret; +} + +module_init(igb_init_module); + +/** + * igb_exit_module - Driver Exit Cleanup Routine + * + * igb_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit igb_exit_module(void) +{ +#ifdef CONFIG_IGB_DCA + dca_unregister_notify(&dca_notifier); +#endif + pci_unregister_driver(&igb_driver); +} + +module_exit(igb_exit_module); + +#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) +/** + * igb_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + **/ +static void igb_cache_ring_register(struct igb_adapter *adapter) +{ + int i = 0, j = 0; + u32 rbase_offset = adapter->vfs_allocated_count; + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* The queues are allocated for virtualization such that VF 0 + * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. + * In order to avoid collision we start at the first free queue + * and continue consuming queues in the same sequence + */ + if (adapter->vfs_allocated_count) { + for (; i < adapter->rss_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + + Q_IDX_82576(i); + } + fallthrough; + case e1000_82575: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = rbase_offset + j; + break; + } +} + +u32 igb_rd32(struct e1000_hw *hw, u32 reg) +{ + struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (E1000_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igb->netdev; + hw->hw_addr = NULL; + netdev_err(netdev, "PCIe link lost\n"); + WARN(pci_device_is_present(igb->pdev), + "igb: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +/** + * igb_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * This function is intended to handle the writing of the IVAR register + * for adapters 82576 and newer. The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + **/ +static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(E1000_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | E1000_IVAR_VALID) << offset; + + array_wr32(E1000_IVAR0, index, ivar); +} + +#define IGB_N0_QUEUE -1 +static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + int rx_queue = IGB_N0_QUEUE; + int tx_queue = IGB_N0_QUEUE; + u32 msixbm = 0; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case e1000_82575: + /* The 82575 assigns vectors using a bitmask, which matches the + * bitmask for the EICR/EIMS/EIMC registers. To assign one + * or more queues to a vector, we write the appropriate bits + * into the MSIXBM register for that vector. + */ + if (rx_queue > IGB_N0_QUEUE) + msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; + if (tx_queue > IGB_N0_QUEUE) + msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; + array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); + q_vector->eims_value = msixbm; + break; + case e1000_82576: + /* 82576 uses a table that essentially consists of 2 columns + * with 8 rows. The ordering is column-major so we use the + * lower 3 bits as the row index, and the 4th bit as the + * column offset. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue & 0x7, + (rx_queue & 0x8) << 1); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue & 0x7, + ((tx_queue & 0x8) << 1) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* On 82580 and newer adapters the scheme is similar to 82576 + * however instead of ordering column-major we have things + * ordered row-major. So we traverse the table by using + * bit 0 as the column offset, and the remaining bits as the + * row index. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + BUG(); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igb_configure_msix - Configure MSI-X hardware + * @adapter: board private structure to initialize + * + * igb_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void igb_configure_msix(struct igb_adapter *adapter) +{ + u32 tmp; + int i, vector = 0; + struct e1000_hw *hw = &adapter->hw; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case e1000_82575: + tmp = rd32(E1000_CTRL_EXT); + /* enable MSI-X PBA support*/ + tmp |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask interrupts upon ICR read. */ + tmp |= E1000_CTRL_EXT_EIAME; + tmp |= E1000_CTRL_EXT_IRCA; + + wr32(E1000_CTRL_EXT, tmp); + + /* enable msix_other interrupt */ + array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); + adapter->eims_other = E1000_EIMS_OTHER; + + break; + + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_GPIE_PBA | E1000_GPIE_EIAME | + E1000_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | E1000_IVAR_VALID) << 8; + + wr32(E1000_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igb_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igb_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure to initialize + * + * igb_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int igb_request_msix(struct igb_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + struct net_device *netdev = adapter->netdev; + int i, err = 0, vector = 0, free_vector = 0; + + if (adapter->ecdev) { + return 0; + } + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igb_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igb_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + **/ +static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igb_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igb_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igb_free_q_vector. + **/ +static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* Coming from igb_set_interrupt_capability, the vectors are not yet + * allocated. So, q_vector is NULL so we should stop here. + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + if (!adapter->ecdev) + netif_napi_del(&q_vector->napi); + +} + +static void igb_reset_interrupt_capability(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + pci_disable_msix(adapter->pdev); + else if (adapter->flags & IGB_FLAG_HAS_MSI) + pci_disable_msi(adapter->pdev); + + while (v_idx--) + igb_reset_q_vector(adapter, v_idx); +} + +/** + * igb_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void igb_free_q_vectors(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igb_reset_q_vector(adapter, v_idx); + igb_free_q_vector(adapter, v_idx); + } +} + +/** + * igb_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: board private structure to initialize + * + * This function resets the device so that it has 0 Rx queues, Tx queues, and + * MSI-X interrupts allocated. + */ +static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) +{ + igb_free_q_vectors(adapter); + igb_reset_interrupt_capability(adapter); +} + +/** + * igb_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) +{ + int err; + int numvecs, i; + + if (!msix) + goto msi_only; + adapter->flags |= IGB_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + if (adapter->vfs_allocated_count) + adapter->num_tx_queues = 1; + else + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + igb_reset_interrupt_capability(adapter); + + /* If we can't do MSI-X, try MSI */ +msi_only: + adapter->flags &= ~IGB_FLAG_HAS_MSIX; +#ifdef CONFIG_PCI_IOV + /* disable SR-IOV for non MSI-X configurations */ + if (adapter->vf_data) { + struct e1000_hw *hw = &adapter->hw; + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); + msleep(500); + + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&adapter->pdev->dev, "IOV Disabled\n"); + } +#endif + adapter->vfs_allocated_count = 0; + adapter->rss_queues = 1; + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGB_FLAG_HAS_MSI; +} + +static void igb_add_ring(struct igb_ring *ring, + struct igb_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igb_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int igb_alloc_q_vector(struct igb_adapter *adapter, + int v_count, int v_idx, + int txr_count, int txr_idx, + int rxr_count, int rxr_idx) +{ + struct igb_q_vector *q_vector; + struct igb_ring *ring; + int ring_count; + size_t size; + + /* igb only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + size = struct_size(q_vector, ring, ring_count); + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) { + q_vector = kzalloc(size, GFP_KERNEL); + } else if (size > ksize(q_vector)) { + struct igb_q_vector *new_q_vector; + + new_q_vector = kzalloc(size, GFP_KERNEL); + if (new_q_vector) + kfree_rcu(q_vector, rcu); + q_vector = new_q_vector; + } else { + memset(q_vector, 0, size); + } + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + if (!adapter->ecdev) { + netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll); + } + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igb_add_ring(ring, &q_vector->tx); + + /* For 82575, context index must be unique per ring. */ + if (adapter->hw.mac.type == e1000_82575) + set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + ring->cbs_enable = false; + ring->idleslope = 0; + ring->sendslope = 0; + ring->hicredit = 0; + ring->locredit = 0; + + u64_stats_init(&ring->tx_syncp); + u64_stats_init(&ring->tx_syncp2); + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igb_add_ring(ring, &q_vector->rx); + + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); + + /* On i350, i354, i210, and i211, loopback VLAN packets + * have the tag byte-swapped. + */ + if (adapter->hw.mac.type >= e1000_i350) + set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + u64_stats_init(&ring->rx_syncp); + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + + +/** + * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int igb_alloc_q_vectors(struct igb_adapter *adapter) +{ + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igb_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * This function initializes the interrupts and allocates all of the queues. + **/ +static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + + igb_set_interrupt_capability(adapter, msix); + + err = igb_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igb_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igb_reset_interrupt_capability(adapter); + return err; +} + +/** + * igb_request_irq - initialize interrupts + * @adapter: board private structure to initialize + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igb_request_irq(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + err = igb_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + igb_clear_interrupt_scheme(adapter); + err = igb_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + + igb_setup_all_tx_resources(adapter); + igb_setup_all_rx_resources(adapter); + igb_configure(adapter); + } + + igb_assign_vector(adapter->q_vector[0], 0); + + if (!adapter->ecdev && adapter->flags & IGB_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, igb_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igb_reset_interrupt_capability(adapter); + adapter->flags &= ~IGB_FLAG_HAS_MSI; + } + + if (!adapter->ecdev) { + err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + dev_err(&pdev->dev, "Error %d getting interrupt\n", + err); + } + +request_done: + return err; +} + +static void igb_free_irq(struct igb_adapter *adapter) +{ + if (adapter->ecdev) + return; + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igb_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void igb_irq_disable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* we need to be careful when disabling interrupts. The VFs are also + * mapped into these registers and so clearing the bits can cause + * issues on the VF drivers so we only need to clear what we set + */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 regval = rd32(E1000_EIAM); + + wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); + wr32(E1000_EIMC, adapter->eims_enable_mask); + regval = rd32(E1000_EIAC); + wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(E1000_IAM, 0); + wr32(E1000_IMC, ~0); + wrfl(); + + if (adapter->ecdev) { + /* skip synchronizing IRQs */ + return; + } + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * igb_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void igb_irq_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) + return; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; + u32 regval = rd32(E1000_EIAC); + + wr32(E1000_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(E1000_EIAM); + wr32(E1000_EIAM, regval | adapter->eims_enable_mask); + wr32(E1000_EIMS, adapter->eims_enable_mask); + if (adapter->vfs_allocated_count) { + wr32(E1000_MBVFIMR, 0xFF); + ims |= E1000_IMS_VMMB; + } + wr32(E1000_IMS, ims); + } else { + wr32(E1000_IMS, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + wr32(E1000_IAM, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + } +} + +static void igb_update_mng_vlan(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 pf_id = adapter->vfs_allocated_count; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + /* add VID to filter table */ + igb_vfta_set(hw, vid, pf_id, true, true); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; + } + + if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) { + /* remove VID from filter table */ + igb_vfta_set(hw, vid, pf_id, false, true); + } +} + +/** + * igb_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + **/ +static void igb_release_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); +} + +/** + * igb_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + **/ +static void igb_get_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); +} + +static void enable_fqtss(struct igb_adapter *adapter, bool enable) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + + WARN_ON(hw->mac.type != e1000_i210); + + if (enable) + adapter->flags |= IGB_FLAG_FQTSS; + else + adapter->flags &= ~IGB_FLAG_FQTSS; + + if (netif_running(netdev)) + schedule_work(&adapter->reset_task); +} + +static bool is_fqtss_enabled(struct igb_adapter *adapter) +{ + return (adapter->flags & IGB_FLAG_FQTSS) ? true : false; +} + +static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue, + enum tx_queue_prio prio) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 4); + + val = rd32(E1000_I210_TXDCTL(queue)); + + if (prio == TX_QUEUE_PRIO_HIGH) + val |= E1000_TXDCTL_PRIORITY; + else + val &= ~E1000_TXDCTL_PRIORITY; + + wr32(E1000_I210_TXDCTL(queue), val); +} + +static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + + val = rd32(E1000_I210_TQAVCC(queue)); + + if (mode == QUEUE_MODE_STREAM_RESERVATION) + val |= E1000_TQAVCC_QUEUEMODE; + else + val &= ~E1000_TQAVCC_QUEUEMODE; + + wr32(E1000_I210_TQAVCC(queue), val); +} + +static bool is_any_cbs_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->cbs_enable) + return true; + } + + return false; +} + +static bool is_any_txtime_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->launchtime_enable) + return true; + } + + return false; +} + +/** + * igb_config_tx_modes - Configure "Qav Tx mode" features on igb + * @adapter: pointer to adapter struct + * @queue: queue number + * + * Configure CBS and Launchtime for a given hardware queue. + * Parameters are retrieved from the correct Tx ring, so + * igb_save_cbs_params() and igb_save_txtime_params() should be used + * for setting those correctly prior to this function being called. + **/ +static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_ring *ring; + u32 tqavcc, tqavctrl; + u16 value; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + ring = adapter->tx_ring[queue]; + + /* If any of the Qav features is enabled, configure queues as SR and + * with HIGH PRIO. If none is, then configure them with LOW PRIO and + * as SP. + */ + if (ring->cbs_enable || ring->launchtime_enable) { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); + set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + } else { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); + set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); + } + + /* If CBS is enabled, set DataTranARB and config its parameters. */ + if (ring->cbs_enable || queue == 0) { + /* i210 does not allow the queue 0 to be in the Strict + * Priority mode while the Qav mode is enabled, so, + * instead of disabling strict priority mode, we give + * queue 0 the maximum of credits possible. + * + * See section 8.12.19 of the i210 datasheet, "Note: + * Queue0 QueueMode must be set to 1b when + * TransmitMode is set to Qav." + */ + if (queue == 0 && !ring->cbs_enable) { + /* max "linkspeed" idleslope in kbps */ + ring->idleslope = 1000000; + ring->hicredit = ETH_FRAME_LEN; + } + + /* Always set data transfer arbitration to credit-based + * shaper algorithm on TQAVCTRL if CBS is enabled for any of + * the queues. + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + + /* According to i210 datasheet section 7.2.7.7, we should set + * the 'idleSlope' field from TQAVCC register following the + * equation: + * + * For 100 Mbps link speed: + * + * value = BW * 0x7735 * 0.2 (E1) + * + * For 1000Mbps link speed: + * + * value = BW * 0x7735 * 2 (E2) + * + * E1 and E2 can be merged into one equation as shown below. + * Note that 'link-speed' is in Mbps. + * + * value = BW * 0x7735 * 2 * link-speed + * -------------- (E3) + * 1000 + * + * 'BW' is the percentage bandwidth out of full link speed + * which can be found with the following equation. Note that + * idleSlope here is the parameter from this function which + * is in kbps. + * + * BW = idleSlope + * ----------------- (E4) + * link-speed * 1000 + * + * That said, we can come up with a generic equation to + * calculate the value we should set it TQAVCC register by + * replacing 'BW' in E3 by E4. The resulting equation is: + * + * value = idleSlope * 0x7735 * 2 * link-speed + * ----------------- -------------- (E5) + * link-speed * 1000 1000 + * + * 'link-speed' is present in both sides of the fraction so + * it is canceled out. The final equation is the following: + * + * value = idleSlope * 61034 + * ----------------- (E6) + * 1000000 + * + * NOTE: For i210, given the above, we can see that idleslope + * is represented in 16.38431 kbps units by the value at + * the TQAVCC register (1Gbps / 61034), which reduces + * the granularity for idleslope increments. + * For instance, if you want to configure a 2576kbps + * idleslope, the value to be written on the register + * would have to be 157.23. If rounded down, you end + * up with less bandwidth available than originally + * required (~2572 kbps). If rounded up, you end up + * with a higher bandwidth (~2589 kbps). Below the + * approach we take is to always round up the + * calculated value, so the resulting bandwidth might + * be slightly higher for some configurations. + */ + value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); + + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + tqavcc |= value; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + wr32(E1000_I210_TQAVHC(queue), + 0x80000000 + ring->hicredit * 0x7735); + } else { + + /* Set idleSlope to zero. */ + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + /* Set hiCredit to zero. */ + wr32(E1000_I210_TQAVHC(queue), 0); + + /* If CBS is not enabled for any queues anymore, then return to + * the default state of Data Transmission Arbitration on + * TQAVCTRL. + */ + if (!is_any_cbs_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* If LaunchTime is enabled, set DataTranTIM. */ + if (ring->launchtime_enable) { + /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled + * for any of the SR queues, and configure fetchtime delta. + * XXX NOTE: + * - LaunchTime will be enabled for all SR queues. + * - A fixed offset can be added relative to the launch + * time of all packets if configured at reg LAUNCH_OS0. + * We are keeping it as 0 for now (default value). + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANTIM | + E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } else { + /* If Launchtime is not enabled for any SR queues anymore, + * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta, + * effectively disabling Launchtime. + */ + if (!is_any_txtime_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM; + tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* XXX: In i210 controller the sendSlope and loCredit parameters from + * CBS are not configurable by software so we don't do any 'controller + * configuration' in respect to these parameters. + */ + + netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", + ring->cbs_enable ? "enabled" : "disabled", + ring->launchtime_enable ? "enabled" : "disabled", + queue, + ring->idleslope, ring->sendslope, + ring->hicredit, ring->locredit); +} + +static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, + bool enable) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + +static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +/** + * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable + * @adapter: pointer to adapter struct + * + * Configure TQAVCTRL register switching the controller's Tx mode + * if FQTSS mode is enabled or disabled. Additionally, will issue + * a call to igb_config_tx_modes() per queue so any previously saved + * Tx parameters are applied. + **/ +static void igb_setup_tx_mode(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 val; + + /* Only i210 controller supports changing the transmission mode. */ + if (hw->mac.type != e1000_i210) + return; + + if (is_fqtss_enabled(adapter)) { + int i, max_queue; + + /* Configure TQAVCTRL register: set transmit mode to 'Qav', + * set data fetch arbitration to 'round robin', set SP_WAIT_SR + * so SP queues wait for SR ones. + */ + val = rd32(E1000_I210_TQAVCTRL); + val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR; + val &= ~E1000_TQAVCTRL_DATAFETCHARB; + wr32(E1000_I210_TQAVCTRL, val); + + /* Configure Tx and Rx packet buffers sizes as described in + * i210 datasheet section 7.2.7.7. + */ + val = rd32(E1000_TXPBS); + val &= ~I210_TXPBSIZE_MASK; + val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB | + I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB; + wr32(E1000_TXPBS, val); + + val = rd32(E1000_RXPBS); + val &= ~I210_RXPBSIZE_MASK; + val |= I210_RXPBSIZE_PB_30KB; + wr32(E1000_RXPBS, val); + + /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ + * register should not exceed the buffer size programmed in + * TXPBS. The smallest buffer size programmed in TXPBS is 4kB + * so according to the datasheet we should set MAX_TPKT_SIZE to + * 4kB / 64. + * + * However, when we do so, no frame from queue 2 and 3 are + * transmitted. It seems the MAX_TPKT_SIZE should not be great + * or _equal_ to the buffer size programmed in TXPBS. For this + * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64. + */ + val = (4096 - 1) / 64; + wr32(E1000_I210_DTXMXPKTSZ, val); + + /* Since FQTSS mode is enabled, apply any CBS configuration + * previously set. If no previous CBS configuration has been + * done, then the initial configuration is applied, which means + * CBS is disabled. + */ + max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ? + adapter->num_tx_queues : I210_SR_QUEUES_NUM; + + for (i = 0; i < max_queue; i++) { + igb_config_tx_modes(adapter, i); + } + } else { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT); + + val = rd32(E1000_I210_TQAVCTRL); + /* According to Section 8.12.21, the other flags we've set when + * enabling FQTSS are not relevant when disabling FQTSS so we + * don't set they here. + */ + val &= ~E1000_TQAVCTRL_XMIT_MODE; + wr32(E1000_I210_TQAVCTRL, val); + } + + netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ? + "enabled" : "disabled"); +} + +/** + * igb_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void igb_configure(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + igb_get_hw_control(adapter); + igb_set_rx_mode(netdev); + igb_setup_tx_mode(adapter); + + igb_restore_vlan(adapter); + + igb_setup_tctl(adapter); + igb_setup_mrqc(adapter); + igb_setup_rctl(adapter); + + igb_nfc_filter_restore(adapter); + igb_configure_tx(adapter); + igb_configure_rx(adapter); + + igb_rx_fifo_flush_82575(&adapter->hw); + + /* call igb_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); + } +} + +/** + * igb_power_up_link - Power up the phy/serdes link + * @adapter: address of board private structure + **/ +void igb_power_up_link(struct igb_adapter *adapter) +{ + igb_reset_phy(&adapter->hw); + + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_up_phy_copper(&adapter->hw); + else + igb_power_up_serdes_link_82575(&adapter->hw); + + igb_setup_link(&adapter->hw); +} + +/** + * igb_power_down_link - Power down the phy/serdes link + * @adapter: address of board private structure + */ +static void igb_power_down_link(struct igb_adapter *adapter) +{ + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_down_phy_copper_82575(&adapter->hw); + else + igb_shutdown_serdes_link_82575(&adapter->hw); +} + +/** + * igb_check_swap_media - Detect and switch function for Media Auto Sense + * @adapter: address of the board private structure + **/ +static void igb_check_swap_media(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext, connsw; + bool swap_now = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + connsw = rd32(E1000_CONNSW); + + /* need to live swap if current media is copper and we have fiber/serdes + * to go to. + */ + + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { + swap_now = true; + } else if ((hw->phy.media_type != e1000_media_type_copper) && + !(connsw & E1000_CONNSW_SERDESD)) { + /* copper signal takes time to appear */ + if (adapter->copper_tries < 4) { + adapter->copper_tries++; + connsw |= E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + return; + } else { + adapter->copper_tries = 0; + if ((connsw & E1000_CONNSW_PHYSD) && + (!(connsw & E1000_CONNSW_PHY_PDN))) { + swap_now = true; + connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + } + } + } + + if (!swap_now) + return; + + switch (hw->phy.media_type) { + case e1000_media_type_copper: + netdev_info(adapter->netdev, + "MAS: changing media to fiber/serdes\n"); + ctrl_ext |= + E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + adapter->copper_tries = 0; + break; + case e1000_media_type_internal_serdes: + case e1000_media_type_fiber: + netdev_info(adapter->netdev, + "MAS: changing media to copper\n"); + ctrl_ext &= + ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + break; + default: + /* shouldn't get here during regular operation */ + netdev_err(adapter->netdev, + "AMS: Invalid media type found, returning\n"); + break; + } + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +/** + * igb_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + **/ +int igb_up(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* hardware has been reset, we need to reload some things */ + igb_configure(adapter); + + clear_bit(__IGB_DOWN, &adapter->state); + + if (!adapter->ecdev) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); + } + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + igb_configure_msix(adapter); + else + igb_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(E1000_TSICR); + rd32(E1000_ICR); + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + if (!adapter->ecdev) { + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + } + + if ((adapter->flags & IGB_FLAG_EEE) && + (!hw->dev_spec._82575.eee_disable)) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + + return 0; +} + +void igb_down(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__IGB_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = rd32(E1000_RCTL); + wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + igb_nfc_filter_exit(adapter); + + if (!adapter->ecdev) { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + /* disable transmits in the hardware */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_EN; + wr32(E1000_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 11000); + + igb_irq_disable(adapter); + + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (!adapter->ecdev && adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igb_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + + igb_clean_all_tx_rings(adapter); + igb_clean_all_rx_rings(adapter); +#ifdef CONFIG_IGB_DCA + + /* since we reset the hardware DCA settings were cleared */ + igb_setup_dca(adapter); +#endif +} + +void igb_reinit_locked(struct igb_adapter *adapter) +{ + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igb_down(adapter); + igb_up(adapter); + clear_bit(__IGB_RESETTING, &adapter->state); +} + +/** igb_enable_mas - Media Autosense re-enable after swap + * + * @adapter: adapter struct + **/ +static void igb_enable_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 connsw = rd32(E1000_CONNSW); + + /* configure for SerDes media detect */ + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_SERDESD))) { + connsw |= E1000_CONNSW_ENRGSRC; + connsw |= E1000_CONNSW_AUTOSENSE_EN; + wr32(E1000_CONNSW, connsw); + wrfl(); + } +} + +#ifdef CONFIG_IGB_HWMON +/** + * igb_set_i2c_bb - Init I2C interface + * @hw: pointer to hardware structure + **/ +static void igb_set_i2c_bb(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 i2cctl; + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_I2C_ENA; + wr32(E1000_CTRL_EXT, ctrl_ext); + wrfl(); + + i2cctl = rd32(E1000_I2CPARAMS); + i2cctl |= E1000_I2CBB_EN + | E1000_I2C_CLK_OE_N + | E1000_I2C_DATA_OE_N; + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} +#endif + +void igb_reset(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + switch (mac->type) { + case e1000_i350: + case e1000_i354: + case e1000_82580: + pba = rd32(E1000_RXPBS); + pba = igb_rxpbs_adjust_82580(pba); + break; + case e1000_82576: + pba = rd32(E1000_RXPBS); + pba &= E1000_RXPBS_SIZE_MASK_82576; + break; + case e1000_82575: + case e1000_i210: + case e1000_i211: + default: + pba = E1000_PBA_34K; + break; + } + + if (mac->type == e1000_82575) { + u32 min_rx_space, min_tx_space, needed_tx_space; + + /* write Rx PBA so that hardware can report correct Tx PBA */ + wr32(E1000_PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024); + + /* The Tx FIFO also stores 16 bytes of information about the Tx + * but don't include Ethernet FCS because hardware appends it. + * We only need to round down to the nearest 512 byte block + * count since the value we care about is 2 frames, not 1. + */ + min_tx_space = adapter->max_frame_size; + min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN; + min_tx_space = DIV_ROUND_UP(min_tx_space, 512); + + /* upper 16 bits has Tx packet buffer allocation size in KB */ + needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16); + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation. + */ + if (needed_tx_space < pba) { + pba -= needed_tx_space; + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + + /* adjust PBA for jumbo frames */ + wr32(E1000_PBA, pba); + } + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + /* disable receive for all VFs and wait one second */ + if (adapter->vfs_allocated_count) { + int i; + + for (i = 0 ; i < adapter->vfs_allocated_count; i++) + adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; + + /* ping all the active vfs to let them know we are going down */ + igb_ping_all_vfs(adapter); + + /* disable transmits and receives */ + wr32(E1000_VFRE, 0); + wr32(E1000_VFTE, 0); + } + + /* Allow time for pending master requests to run */ + hw->mac.ops.reset_hw(hw); + wr32(E1000_WUC, 0); + + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + /* need to resetup here after media swap */ + adapter->ei.get_invariants(hw); + adapter->flags &= ~IGB_FLAG_MEDIA_RESET; + } + if ((mac->type == e1000_82575 || mac->type == e1000_i350) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_enable_mas(adapter); + } + if (hw->mac.ops.init_hw(hw)) + dev_err(&pdev->dev, "Hardware Error\n"); + + /* RAR registers were cleared during init_hw, clear mac table */ + igb_flush_mac_table(adapter); + __dev_uc_unsync(adapter->netdev, NULL); + + /* Recover default RAR entry */ + igb_set_default_mac_filter(adapter); + + /* Flow control settings reset on hardware reset, so guarantee flow + * control is off when forcing speed. + */ + if (!hw->mac.autoneg) + igb_force_mac_fc(hw); + + igb_init_dmac(adapter, pba); +#ifdef CONFIG_IGB_HWMON + /* Re-initialize the thermal sensor on i350 devices. */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (mac->type == e1000_i350 && hw->bus.func == 0) { + /* If present, re-initialize the external thermal sensor + * interface. + */ + if (adapter->ets) + igb_set_i2c_bb(hw); + mac->ops.init_thermal_sensor_thresh(hw); + } + } +#endif + /* Re-establish EEE setting */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (mac->type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + igb_set_eee_i350(hw, true, true); + break; + case e1000_i354: + igb_set_eee_i354(hw, true, true); + break; + default: + break; + } + } + if (!adapter->ecdev && !netif_running(adapter->netdev)) + igb_power_down_link(adapter); + + igb_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + + /* Re-enable PTP, where applicable. */ + if (adapter->ptp_flags & IGB_PTP_ENABLED) + igb_ptp_reset(adapter); + + igb_get_phy_info(hw); +} + +static netdev_features_t igb_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igb_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igb_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igb_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) { + struct hlist_node *node2; + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + hlist_for_each_entry_safe(rule, node2, + &adapter->nfc_filter_list, nfc_node) { + igb_erase_filter(adapter, rule); + hlist_del(&rule->nfc_node); + kfree(rule); + } + spin_unlock(&adapter->nfc_lock); + adapter->nfc_filter_count = 0; + } + + netdev->features = features; + + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + + return 1; +} + +static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags, + struct netlink_ext_ack *extack) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + struct igb_adapter *adapter = netdev_priv(dev); + int vfn = adapter->vfs_allocated_count; + + if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn)) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + +#define IGB_MAX_MAC_HDR_LEN 127 +#define IGB_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +igb_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_GSO_UDP_L4 | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_GSO_UDP_L4 | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igb_offload_apply(struct igb_adapter *adapter, s32 queue) +{ + if (!is_fqtss_enabled(adapter)) { + enable_fqtss(adapter, true); + return; + } + + igb_config_tx_modes(adapter, queue); + + if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter)) + enable_fqtss(adapter, false); +} + +static int igb_offload_cbs(struct igb_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* CBS offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* CBS offloading is only supported by queue 0 and queue 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +#define VLAN_PRIO_FULL_MASK (0x07) + +static int igb_parse_cls_flower(struct igb_adapter *adapter, + struct flow_cls_offload *f, + int traffic_class, + struct igb_nfc_filter *input) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; + struct netlink_ext_ack *extack = f->common.extack; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported"); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + if (!is_zero_ether_addr(match.mask->dst)) { + if (!is_broadcast_ether_addr(match.mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, match.key->dst); + } + + if (!is_zero_ether_addr(match.mask->src)) { + if (!is_broadcast_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, match.key->src); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + if (match.mask->n_proto) { + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; + input->filter.etype = match.key->n_proto; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_priority) { + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + input->filter.vlan_tci = + (__force __be16)match.key->vlan_priority; + } + } + + input->action = traffic_class; + input->cookie = f->cookie; + + return 0; +} + +static int igb_configure_clsflower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + struct netlink_ext_ack *extack = cls_flower->common.extack; + struct igb_nfc_filter *filter, *f; + int err, tc; + + tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); + if (tc < 0) { + NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class"); + return -EINVAL; + } + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + err = igb_parse_cls_flower(adapter, cls_flower, tc, filter); + if (err < 0) + goto err_parse; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in ethtool"); + goto err_locked; + } + } + + hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in cls_flower"); + goto err_locked; + } + } + + err = igb_add_filter(adapter, filter); + if (err < 0) { + NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter"); + goto err_locked; + } + + hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list); + + spin_unlock(&adapter->nfc_lock); + + return 0; + +err_locked: + spin_unlock(&adapter->nfc_lock); + +err_parse: + kfree(filter); + + return err; +} + +static int igb_delete_clsflower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + struct igb_nfc_filter *filter; + int err; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node) + if (filter->cookie == cls_flower->cookie) + break; + + if (!filter) { + err = -ENOENT; + goto out; + } + + err = igb_erase_filter(adapter, filter); + if (err < 0) + goto out; + + hlist_del(&filter->nfc_node); + kfree(filter); + +out: + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return igb_configure_clsflower(adapter, cls_flower); + case FLOW_CLS_DESTROY: + return igb_delete_clsflower(adapter, cls_flower); + case FLOW_CLS_STATS: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct igb_adapter *adapter = cb_priv; + + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return igb_setup_tc_cls_flower(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igb_offload_txtime(struct igb_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* Launchtime offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* Launchtime offloading is only supported by queues 0 and 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + +static LIST_HEAD(igb_block_cb_list); + +static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_CBS: + return igb_offload_cbs(adapter, type_data); + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &igb_block_cb_list, + igb_setup_tc_block_cb, + adapter, adapter, true); + + case TC_SETUP_QDISC_ETF: + return igb_offload_txtime(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) +{ + int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD; + struct igb_adapter *adapter = netdev_priv(dev); + struct bpf_prog *prog = bpf->prog, *old_prog; + bool running = netif_running(dev); + bool need_reset; + + /* verify igb ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + if (frame_size > igb_rx_bufsz(ring)) { + NL_SET_ERR_MSG_MOD(bpf->extack, + "The RX buffer size is too small for the frame size"); + netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n", + igb_rx_bufsz(ring), frame_size); + return -EINVAL; + } + } + + old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); + + /* device is up and bpf is added/removed, must setup the RX queues */ + if (need_reset && running) { + igb_close(dev); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + (void)xchg(&adapter->rx_ring[i]->xdp_prog, + adapter->xdp_prog); + } + + if (old_prog) + bpf_prog_put(old_prog); + + /* bpf is just replaced, RXQ and MTU are already setup */ + if (!need_reset) + return 0; + + if (running) + igb_open(dev); + + return 0; +} + +static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct igb_adapter *adapter = netdev_priv(dev); + if (adapter->ecdev) + return -EBUSY; + + + switch (xdp->command) { + case XDP_SETUP_PROG: + return igb_xdp_setup(dev, xdp); + default: + return -EINVAL; + } +} + +static void igb_xdp_ring_update_tail(struct igb_ring *ring) +{ + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) +{ + unsigned int r_idx = smp_processor_id(); + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct igb_ring *tx_ring; + struct netdev_queue *nq; + u32 ret; + + if (unlikely(!xdpf)) + return IGB_XDP_CONSUMED; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + if (unlikely(!tx_ring)) + return IGB_XDP_CONSUMED; + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); + __netif_tx_unlock(nq); + + return ret; +} + +static int igb_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct igb_ring *tx_ring; + struct netdev_queue *nq; + int nxmit = 0; + int i; + + if (adapter->ecdev) + return -EBUSY; + + if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + if (unlikely(!tx_ring)) + return -ENXIO; + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); + if (err != IGB_XDP_TX) + break; + nxmit++; + } + + __netif_tx_unlock(nq); + + if (unlikely(flags & XDP_XMIT_FLUSH)) + igb_xdp_ring_update_tail(tx_ring); + + return nxmit; +} + +static const struct net_device_ops igb_netdev_ops = { + .ndo_open = igb_open, + .ndo_stop = igb_close, + .ndo_start_xmit = igb_xmit_frame, + .ndo_get_stats64 = igb_get_stats64, + .ndo_set_rx_mode = igb_set_rx_mode, + .ndo_set_mac_address = igb_set_mac, + .ndo_change_mtu = igb_change_mtu, + .ndo_eth_ioctl = igb_ioctl, + .ndo_tx_timeout = igb_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, + .ndo_set_vf_mac = igb_ndo_set_vf_mac, + .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, + .ndo_set_vf_rate = igb_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, + .ndo_set_vf_trust = igb_ndo_set_vf_trust, + .ndo_get_vf_config = igb_ndo_get_vf_config, + .ndo_fix_features = igb_fix_features, + .ndo_set_features = igb_set_features, + .ndo_fdb_add = igb_ndo_fdb_add, + .ndo_features_check = igb_features_check, + .ndo_setup_tc = igb_setup_tc, + .ndo_bpf = igb_xdp, + .ndo_xdp_xmit = igb_xdp_xmit, +}; + +/** +* ec_poll - EtherCAT poll routine +* @netdev: net device structure +* +* This function can never fail. +* +**/ +void ec_poll(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + int budget = 64; + + if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) { + struct e1000_hw *hw = &adapter->hw; + bool link; + hw->mac.get_link_status = true; + link = igb_has_link(adapter); + ecdev_set_link(adapter->ecdev, link); + adapter->ec_watchdog_jiffies = jiffies; + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + if (q_vector->tx.ring) { + igb_clean_tx_irq(q_vector, budget); + } + + if (q_vector->rx.ring) { + igb_clean_rx_irq(q_vector, budget); + } + } +} + +/** + * igb_set_fw_version - Configure version string for ethtool + * @adapter: adapter struct + **/ +void igb_set_fw_version(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_fw_version fw; + + igb_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (!(igb_get_flash_presence_i210(hw))) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); + break; + } + fallthrough; + default: + /* if option is rom valid, display its version too */ + if (fw.or_valid) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ + } else if (fw.etrack_id != 0X0000) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); + } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); + } + break; + } +} + +/** + * igb_init_mas - init Media Autosense feature if enabled in the NVM + * + * @adapter: adapter struct + **/ +static void igb_init_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 eeprom_data; + + hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); + switch (hw->bus.func) { + case E1000_FUNC_0: + if (eeprom_data & IGB_MAS_ENABLE_0) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_1: + if (eeprom_data & IGB_MAS_ENABLE_1) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_2: + if (eeprom_data & IGB_MAS_ENABLE_2) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_3: + if (eeprom_data & IGB_MAS_ENABLE_3) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + default: + /* Shouldn't get here */ + netdev_err(adapter->netdev, + "MAS: Invalid port configuration, returning\n"); + break; + } +} + +/** + * igb_init_i2c - Init I2C interface + * @adapter: pointer to adapter structure + **/ +static s32 igb_init_i2c(struct igb_adapter *adapter) +{ + s32 status = 0; + + /* I2C interface supported on i350 devices */ + if (adapter->hw.mac.type != e1000_i350) + return 0; + + /* Initialize the i2c bus which is controlled by the registers. + * This bus will use the i2c_algo_bit structure that implements + * the protocol through toggling of the 4 bits in the register. + */ + adapter->i2c_adap.owner = THIS_MODULE; + adapter->i2c_algo = igb_i2c_algo; + adapter->i2c_algo.data = adapter; + adapter->i2c_adap.algo_data = &adapter->i2c_algo; + adapter->i2c_adap.dev.parent = &adapter->pdev->dev; + strscpy(adapter->i2c_adap.name, "igb BB", + sizeof(adapter->i2c_adap.name)); + status = i2c_bit_add_bus(&adapter->i2c_adap); + return status; +} + +/** + * igb_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igb_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igb_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct igb_adapter *adapter; + struct e1000_hw *hw; + u16 eeprom_data = 0; + s32 ret_val; + static int global_quad_port_a; /* global quad port a indication */ + const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; + u8 part_str[E1000_PBANUM_LENGTH]; + int err; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, KERN_ERR "%s (%x:%x) should not be a VF!\n", + pci_name(pdev), pdev->vendor, pdev->device); + return -EINVAL; + } + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + + err = pci_request_mem_regions(pdev, igb_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), + IGB_MAX_TX_QUEUES); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = -EIO; + adapter->io_addr = pci_iomap(pdev, 0, 0); + if (!adapter->io_addr) + goto err_ioremap; + /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igb_netdev_ops; + igb_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC, PHY and NVM function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* setup the private structure */ + err = igb_sw_init(adapter); + if (err) + goto err_sw_init; + + igb_get_bus_info_pcie(hw); + + hw->phy.autoneg_wait_to_complete = false; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = AUTO_ALL_MODES; + hw->phy.disable_polarity_correction = false; + hw->phy.ms_type = e1000_ms_hw_default; + } + + if (igb_check_reset_block(hw)) + dev_info(&pdev->dev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + /* features is initialized to 0 in allocation, it might have bits + * set by igb_sw_init so we should use an or instead of an + * assignment. + */ + netdev->features |= NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; + + if (hw->mac.type >= e1000_82576) + netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; + + if (hw->mac.type >= e1000_i350) + netdev->features |= NETIF_F_HW_TC; + +#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL; + + if (hw->mac.type >= e1000_i350) + netdev->hw_features |= NETIF_F_NTUPLE; + + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + /* make sure the NVM is good , i211/i210 parts can have special NVM + * that doesn't contain a checksum + */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, + "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + break; + default: + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + break; + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + eth_hw_addr_set(netdev, hw->mac.addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + igb_set_default_mac_filter(adapter); + + /* get firmware version for ethtool -i */ + igb_set_fw_version(adapter); + + /* configure RXPBSIZE and TXPBSIZE */ + if (hw->mac.type == e1000_i210) { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + } + + timer_setup(&adapter->watchdog_timer, igb_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igb_reset_task); + INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0x2f; + + hw->fc.requested_mode = e1000_fc_default; + hw->fc.current_mode = e1000_fc_default; + + igb_validate_mdi_setting(hw); + + /* By default, support wake on port A */ + if (hw->bus.func == 0) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* Check the NVM for wake support on non-port A ports */ + if (hw->mac.type >= e1000_82580) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &eeprom_data); + else if (hw->bus.func == 1) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + + if (eeprom_data & IGB_EEPROM_APME) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* now that we have the eeprom settings, apply the special cases where + * the eeprom may be wrong or the board simply won't support wake on + * lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82575GB_QUAD_COPPER: + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + else + adapter->flags |= IGB_FLAG_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + /* If the device can't wake, don't set software support */ + if (!device_can_wakeup(&adapter->pdev->dev)) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + } + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) + adapter->wol |= E1000_WUFC_MAG; + + /* Some vendors want WoL disabled by default, but still supported */ + if ((hw->mac.type == e1000_i350) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + + /* Some vendors want the ability to Use the EEPROM setting as + * enable/disable only, and not for capability + */ + if (((hw->mac.type == e1000_i350) || + (hw->mac.type == e1000_i354)) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (hw->mac.type == e1000_i350) { + if (((pdev->subsystem_device == 0x5001) || + (pdev->subsystem_device == 0x5002)) && + (hw->bus.func == 0)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (pdev->subsystem_device == 0x1F52) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + } + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGB_FLAG_WOL_SUPPORTED); + + /* reset the hardware with the new settings */ + igb_reset(adapter); + + /* Init the I2C interface */ + err = igb_init_i2c(adapter); + if (err) { + dev_err(&pdev->dev, "failed to init i2c interface\n"); + goto err_eeprom; + } + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + + adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); + if (adapter->ecdev) { + err = ecdev_open(adapter->ecdev); + if (err) { + ecdev_withdraw(adapter->ecdev); + goto err_register; + } + adapter->ec_watchdog_jiffies = jiffies; + } else { + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + } + +#ifdef CONFIG_IGB_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + } + +#endif +#ifdef CONFIG_IGB_HWMON + /* Initialize the thermal sensor on i350 devices. */ + if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { + u16 ets_word; + + /* Read the NVM to determine if this i350 device supports an + * external thermal sensor. + */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); + if (ets_word != 0x0000 && ets_word != 0xFFFF) + adapter->ets = true; + else + adapter->ets = false; + /* Only enable I2C bit banging if an external thermal + * sensor is supported. + */ + if (adapter->ets) + igb_set_i2c_bb(hw); + hw->mac.ops.init_thermal_sensor_thresh(hw); + if (igb_sysfs_init(adapter)) + dev_err(&pdev->dev, + "failed to allocate sysfs resources\n"); + } else { + adapter->ets = false; + } +#endif + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + if (hw->dev_spec._82575.mas_capable) + igb_init_mas(adapter); + + /* do hw tstamp init after resetting */ + igb_ptp_init(adapter); + + dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); + /* print bus type/speed/width info, not applicable to i354 */ + if (hw->mac.type != e1000_i354) { + dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", + netdev->name, + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : + (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : + "unknown"), + ((hw->bus.width == e1000_bus_width_pcie_x4) ? + "Width x4" : + (hw->bus.width == e1000_bus_width_pcie_x2) ? + "Width x2" : + (hw->bus.width == e1000_bus_width_pcie_x1) ? + "Width x1" : "unknown"), netdev->dev_addr); + } + + if ((hw->mac.type == e1000_82576 && + rd32(E1000_EECD) & E1000_EECD_PRES) || + (hw->mac.type >= e1000_i210 || + igb_get_flash_presence_i210(hw))) { + ret_val = igb_read_part_string(hw, part_str, + E1000_PBANUM_LENGTH); + } else { + ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; + } + + if (ret_val) + strcpy(part_str, "Unknown"); + dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); + dev_info(&pdev->dev, + "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", + (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : + (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", + adapter->num_rx_queues, adapter->num_tx_queues); + if (hw->phy.media_type == e1000_media_type_copper) { + switch (hw->mac.type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + /* Enable EEE for internal copper PHY devices */ + err = igb_set_eee_i350(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + break; + case e1000_i354: + if ((rd32(E1000_CTRL_EXT) & + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + err = igb_set_eee_i354(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + } + break; + default: + break; + } + } + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + + pm_runtime_put_noidle(&pdev->dev); + return 0; + +err_register: + igb_release_hw_control(adapter); + memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); +err_eeprom: + if (!igb_check_reset_block(hw)) + igb_reset_phy(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); +err_sw_init: + kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); + igb_clear_interrupt_scheme(adapter); +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif + pci_iounmap(pdev, adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +#ifdef CONFIG_PCI_IOV +static int igb_disable_sriov(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + + /* reclaim resources allocated to VFs */ + if (adapter->vf_data) { + /* disable iov and allow time for transactions to clear */ + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); + return -EPERM; + } else { + pci_disable_sriov(pdev); + msleep(500); + } + spin_lock_irqsave(&adapter->vfs_lock, flags); + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; + spin_unlock_irqrestore(&adapter->vfs_lock, flags); + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&pdev->dev, "IOV Disabled\n"); + + /* Re-enable DMA Coalescing flag since IOV is turned off */ + adapter->flags |= IGB_FLAG_DMAC; + } + + return 0; +} + +static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + int old_vfs = pci_num_vf(pdev); + struct vf_mac_filter *mac_list; + int err = 0; + int num_vf_mac_filters, i; + + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { + err = -EPERM; + goto out; + } + if (!num_vfs) + goto out; + + if (old_vfs) { + dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", + old_vfs, max_vfs); + adapter->vfs_allocated_count = old_vfs; + } else + adapter->vfs_allocated_count = num_vfs; + + adapter->vf_data = kcalloc(adapter->vfs_allocated_count, + sizeof(struct vf_data_storage), GFP_KERNEL); + + /* if allocation failed then we do not support SR-IOV */ + if (!adapter->vf_data) { + adapter->vfs_allocated_count = 0; + err = -ENOMEM; + goto out; + } + + /* Due to the limited number of RAR entries calculate potential + * number of MAC filters available for the VFs. Reserve entries + * for PF default MAC, PF MAC filters and at least one RAR entry + * for each VF for VF MAC. + */ + num_vf_mac_filters = adapter->hw.mac.rar_entry_count - + (1 + IGB_PF_MAC_FILTERS_RESERVED + + adapter->vfs_allocated_count); + + adapter->vf_mac_list = kcalloc(num_vf_mac_filters, + sizeof(struct vf_mac_filter), + GFP_KERNEL); + + mac_list = adapter->vf_mac_list; + INIT_LIST_HEAD(&adapter->vf_macs.l); + + if (adapter->vf_mac_list) { + /* Initialize list of VF MAC filters */ + for (i = 0; i < num_vf_mac_filters; i++) { + mac_list->vf = -1; + mac_list->free = true; + list_add(&mac_list->l, &adapter->vf_macs.l); + mac_list++; + } + } else { + /* If we could not allocate memory for the VF MAC filters + * we can continue without this feature but warn user. + */ + dev_err(&pdev->dev, + "Unable to allocate memory for VF MAC filter list\n"); + } + + /* only call pci_enable_sriov() if no VFs are allocated already */ + if (!old_vfs) { + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } + dev_info(&pdev->dev, "%d VFs allocated\n", + adapter->vfs_allocated_count); + for (i = 0; i < adapter->vfs_allocated_count; i++) + igb_vf_configure(adapter, i); + + /* DMA Coalescing is not supported in IOV mode. */ + adapter->flags &= ~IGB_FLAG_DMAC; + goto out; + +err_out: + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; +out: + return err; +} + +#endif +/** + * igb_remove_i2c - Cleanup I2C interface + * @adapter: pointer to adapter structure + **/ +static void igb_remove_i2c(struct igb_adapter *adapter) +{ + /* free the adapter bus structure */ + i2c_del_adapter(&adapter->i2c_adap); +} + +/** + * igb_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igb_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void igb_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (adapter->ecdev) { + ecdev_close(adapter->ecdev); + ecdev_withdraw(adapter->ecdev); + } + + pm_runtime_get_noresume(&pdev->dev); +#ifdef CONFIG_IGB_HWMON + igb_sysfs_exit(adapter); +#endif + igb_remove_i2c(adapter); + igb_ptp_stop(adapter); + /* The watchdog timer may be rescheduled, so explicitly + * disable watchdog from being rescheduled. + */ + set_bit(__IGB_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + +#ifdef CONFIG_IGB_DCA + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + dev_info(&pdev->dev, "DCA disabled\n"); + dca_remove_requester(&pdev->dev); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } +#endif + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif + + if (!adapter->ecdev) { + unregister_netdev(netdev); + } + + igb_clear_interrupt_scheme(adapter); + + pci_iounmap(pdev, adapter->io_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_mem_regions(pdev); + + kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/** + * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space + * @adapter: board private structure to initialize + * + * This function initializes the vf specific data storage and then attempts to + * allocate the VFs. The reason for ordering it this way is because it is much + * mor expensive time wise to disable SR-IOV than it is to allocate and free + * the memory for the VFs. + **/ +static void igb_probe_vfs(struct igb_adapter *adapter) +{ +#ifdef CONFIG_PCI_IOV + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + + /* Virtualization features not supported on i210 family. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + return; + + /* Of the below we really only want the effect of getting + * IGB_FLAG_HAS_MSIX set (if available), without which + * igb_enable_sriov() has no effect. + */ + igb_set_interrupt_capability(adapter, true); + igb_reset_interrupt_capability(adapter); + + pci_sriov_set_totalvfs(pdev, 7); + igb_enable_sriov(pdev, max_vfs); + +#endif /* CONFIG_PCI_IOV */ +} + +unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned int max_rss_queues; + + /* Determine the maximum number of RSS queues supported. */ + switch (hw->mac.type) { + case e1000_i211: + max_rss_queues = IGB_MAX_RX_QUEUES_I211; + break; + case e1000_82575: + case e1000_i210: + max_rss_queues = IGB_MAX_RX_QUEUES_82575; + break; + case e1000_i350: + /* I350 cannot do RSS and SR-IOV at the same time */ + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 1; + break; + } + fallthrough; + case e1000_82576: + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 2; + break; + } + fallthrough; + case e1000_82580: + case e1000_i354: + default: + max_rss_queues = IGB_MAX_RX_QUEUES; + break; + } + + return max_rss_queues; +} + +static void igb_init_queue_configuration(struct igb_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igb_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igb_set_flag_queue_pairs(adapter, max_rss_queues); +} + +void igb_set_flag_queue_pairs(struct igb_adapter *adapter, + const u32 max_rss_queues) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Determine if we need to pair queues. */ + switch (hw->mac.type) { + case e1000_82575: + case e1000_i211: + /* Device supports enough interrupts without queue pairing. */ + break; + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + default: + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; + break; + } +} + +/** + * igb_sw_init - Initialize general software structures (struct igb_adapter) + * @adapter: board private structure to initialize + * + * igb_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int igb_sw_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGB_DEFAULT_TXD; + adapter->rx_ring_count = IGB_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGB_DEFAULT_ITR; + adapter->tx_itr_setting = IGB_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; + + adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->nfc_lock); + spin_lock_init(&adapter->stats64_lock); + + /* init spinlock to avoid concurrency of VF resources */ + spin_lock_init(&adapter->vfs_lock); +#ifdef CONFIG_PCI_IOV + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + if (max_vfs > 7) { + dev_warn(&pdev->dev, + "Maximum of 7 VFs per PF, using max\n"); + max_vfs = adapter->vfs_allocated_count = 7; + } else + adapter->vfs_allocated_count = max_vfs; + if (adapter->vfs_allocated_count) + dev_warn(&pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); + break; + default: + break; + } +#endif /* CONFIG_PCI_IOV */ + + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGB_FLAG_HAS_MSIX; + + adapter->mac_table = kcalloc(hw->mac.rar_entry_count, + sizeof(struct igb_mac_addr), + GFP_KERNEL); + if (!adapter->mac_table) + return -ENOMEM; + + igb_probe_vfs(adapter); + + igb_init_queue_configuration(adapter); + + /* Setup and initialize a copy of the hw vlan table array */ + adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), + GFP_KERNEL); + if (!adapter->shadow_vfta) + return -ENOMEM; + + /* This call may decrease the number of queues */ + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igb_irq_disable(adapter); + + if (hw->mac.type >= e1000_i350) + adapter->flags &= ~IGB_FLAG_DMAC; + + set_bit(__IGB_DOWN, &adapter->state); + return 0; +} + +/** + * __igb_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: indicates whether we are in a resume call + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int __igb_open(struct net_device *netdev, bool resuming) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + int i; + + /* disallow open during test */ + if (test_bit(__IGB_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 0); + } else { + netif_carrier_off(netdev); + } + + /* allocate transmit descriptors */ + err = igb_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igb_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igb_power_up_link(adapter); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + igb_configure(adapter); + + err = igb_request_irq(adapter); + if (err) + goto err_req_irq; + + if (!adapter->ecdev) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(adapter->netdev, + adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); + if (err) + goto err_set_queues; + } + + /* From here on the code is the same as igb_up() */ + clear_bit(__IGB_DOWN, &adapter->state); + + if (!adapter->ecdev) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); + } + + /* Clear any pending interrupts. */ + rd32(E1000_TSICR); + rd32(E1000_ICR); + + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + if (!adapter->ecdev) { + netif_tx_start_all_queues(netdev); + } + + if (!resuming) + pm_runtime_put(&pdev->dev); + + if (!adapter->ecdev) { + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + } + + return 0; + +err_set_queues: + igb_free_irq(adapter); +err_req_irq: + igb_release_hw_control(adapter); + igb_power_down_link(adapter); + igb_free_all_rx_resources(adapter); +err_setup_rx: + igb_free_all_tx_resources(adapter); +err_setup_tx: + igb_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igb_open(struct net_device *netdev) +{ + return __igb_open(netdev, false); +} + +/** + * __igb_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: indicates we are in a suspend call + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int __igb_close(struct net_device *netdev, bool suspending) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igb_down(adapter); + igb_free_irq(adapter); + + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + return 0; +} + +int igb_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igb_close(netdev, false); + return 0; +} + +/** + * igb_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int igb_setup_tx_resources(struct igb_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size; + + size = sizeof(struct igb_tx_buffer) * tx_ring->count; + + tx_ring->tx_buffer_info = vmalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_tx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igb_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Tx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + **/ +void igb_setup_tctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which is enabled by default on 82575 and 82576 */ + wr32(E1000_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + igb_config_collision_dist(hw); + + /* Enable transmits */ + tctl |= E1000_TCTL_EN; + + wr32(E1000_TCTL, tctl); +} + +/** + * igb_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + **/ +void igb_configure_tx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txdctl = 0; + u64 tdba = ring->dma; + int reg_idx = ring->reg_idx; + + wr32(E1000_TDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_tx_desc)); + wr32(E1000_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(E1000_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + E1000_TDT(reg_idx); + wr32(E1000_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGB_TX_PTHRESH; + txdctl |= IGB_TX_HTHRESH << 8; + txdctl |= IGB_TX_WTHRESH << 16; + + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct igb_tx_buffer) * ring->count); + + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + wr32(E1000_TXDCTL(reg_idx), txdctl); +} + +/** + * igb_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igb_configure_tx(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* disable the queues */ + for (i = 0; i < adapter->num_tx_queues; i++) + wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0); + + wrfl(); + usleep_range(10000, 20000); + + for (i = 0; i < adapter->num_tx_queues; i++) + igb_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igb_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int igb_setup_rx_resources(struct igb_ring *rx_ring) +{ + struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); + struct device *dev = rx_ring->dev; + int size, res; + + if (!adapter->ecdev) { + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index, 0); + if (res < 0) { + dev_err(dev, "Failed to register xdp_rxq index %u\n", + rx_ring->queue_index); + return res; + } + } + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; + + rx_ring->rx_buffer_info = vmalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + if (!adapter->ecdev) + rx_ring->xdp_prog = adapter->xdp_prog; + + return 0; + +err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_rx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igb_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Rx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + **/ +static void igb_setup_mrqc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 j, num_rx_queues; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(E1000_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_rx_queues = 2; + break; + default: + break; + } + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGB_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGB_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igb_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + if (adapter->hw.mac.type >= e1000_82576) + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= E1000_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(E1000_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + /* If VMDq is enabled then we set the appropriate mode for that, else + * we default to RSS so that an RSS hash is calculated per packet even + * if we are only using one queue + */ + if (adapter->vfs_allocated_count) { + if (hw->mac.type > e1000_82575) { + /* Set the default pool for the PF's first queue */ + u32 vtctl = rd32(E1000_VT_CTL); + + vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | + E1000_VT_CTL_DISABLE_DEF_POOL); + vtctl |= adapter->vfs_allocated_count << + E1000_VT_CTL_DEFAULT_POOL_SHIFT; + wr32(E1000_VT_CTL, vtctl); + } + if (adapter->rss_queues > 1) + mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ; + else + mrqc |= E1000_MRQC_ENABLE_VMDQ; + } else { + mrqc |= E1000_MRQC_ENABLE_RSS_MQ; + } + igb_vmm_control(adapter); + + wr32(E1000_MRQC, mrqc); +} + +/** + * igb_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +void igb_setup_rctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(E1000_RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* enable stripping of CRC. It's unlikely this will break BMC + * redirection as it did with e1000. Newer features require + * that the HW strips the CRC. + */ + rctl |= E1000_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= E1000_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(E1000_RXDCTL(0), 0); + + /* Attention!!! For SR-IOV PF driver operations you must enable + * queue drop for all VF and PF queues to prevent head of line blocking + * if an un-trusted VF does not provide descriptors to hardware. + */ + if (adapter->vfs_allocated_count) { + /* set all queue drop enable bits */ + wr32(E1000_QDE, ALL_QUEUES); + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + wr32(E1000_RCTL, rctl); +} + +static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, + int vfn) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + if (size > MAX_JUMBO_FRAME_SIZE) + size = MAX_JUMBO_FRAME_SIZE; + + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= size | E1000_VMOLR_LPE; + wr32(E1000_VMOLR(vfn), vmolr); + + return 0; +} + +static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, + int vfn, bool enable) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val, reg; + + if (hw->mac.type < e1000_82576) + return; + + if (hw->mac.type == e1000_i350) + reg = E1000_DVMOLR(vfn); + else + reg = E1000_VMOLR(vfn); + + val = rd32(reg); + if (enable) + val |= E1000_VMOLR_STRVLAN; + else + val &= ~(E1000_VMOLR_STRVLAN); + wr32(reg, val); +} + +static inline void igb_set_vmolr(struct igb_adapter *adapter, + int vfn, bool aupe) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + /* This register exists only on 82576 and newer so if we are older then + * we should exit and do nothing + */ + if (hw->mac.type < e1000_82576) + return; + + vmolr = rd32(E1000_VMOLR(vfn)); + if (aupe) + vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ + else + vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ + + /* clear all bits that might not be set */ + vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); + + if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ + /* for VMDq only allow the VFs and pool 0 to accept broadcast and + * multicast packets + */ + if (vfn <= adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + + wr32(E1000_VMOLR(vfn), vmolr); +} + +/** + * igb_setup_srrctl - configure the split and replication receive control + * registers + * @adapter: Board private structure + * @ring: receive ring to be configured + **/ +void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u32 srrctl = 0; + + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + if (ring_uses_large_buffer(ring)) + srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + if (hw->mac.type >= e1000_82580) + srrctl |= E1000_SRRCTL_TIMESTAMP; + /* Only set Drop Enable if VFs allocated, or we are supporting multiple + * queues and rx flow control is disabled + */ + if (adapter->vfs_allocated_count || + (!(hw->fc.current_mode & e1000_fc_rx_pause) && + adapter->num_rx_queues > 1)) + srrctl |= E1000_SRRCTL_DROP_EN; + + wr32(E1000_SRRCTL(reg_idx), srrctl); +} + +/** + * igb_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + **/ +void igb_configure_rx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + union e1000_adv_rx_desc *rx_desc; + u64 rdba = ring->dma; + int reg_idx = ring->reg_idx; + u32 rxdctl = 0; + + if (!adapter->ecdev) { + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + } + + /* disable the queue */ + wr32(E1000_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(E1000_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(E1000_RDBAH(reg_idx), rdba >> 32); + wr32(E1000_RDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + E1000_RDT(reg_idx); + wr32(E1000_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* set descriptor configuration */ + igb_setup_srrctl(adapter, ring); + + /* set filtering for VMDQ pools */ + igb_set_vmolr(adapter, reg_idx & 0x7, true); + + rxdctl |= IGB_RX_PTHRESH; + rxdctl |= IGB_RX_HTHRESH << 8; + rxdctl |= IGB_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igb_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGB_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + wr32(E1000_RXDCTL(reg_idx), rxdctl); +} + +static void igb_set_rx_buffer_len(struct igb_adapter *adapter, + struct igb_ring *rx_ring) +{ + /* set build_skb and buffer size flags */ + clear_ring_build_skb_enabled(rx_ring); + clear_ring_uses_large_buffer(rx_ring); + + if (adapter->flags & IGB_FLAG_RX_LEGACY) + return; + + set_ring_build_skb_enabled(rx_ring); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + return; + + set_ring_uses_large_buffer(rx_ring); +#endif +} + +/** + * igb_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void igb_configure_rx(struct igb_adapter *adapter) +{ + int i; + + /* set the correct pool for the PF default MAC address in entry 0 */ + igb_set_default_mac_filter(adapter); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *rx_ring = adapter->rx_ring[i]; + + igb_set_rx_buffer_len(adapter, rx_ring); + igb_configure_rx_ring(adapter, rx_ring); + } +} + +/** + * igb_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void igb_free_tx_resources(struct igb_ring *tx_ring) +{ + igb_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igb_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void igb_free_all_tx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igb_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void igb_clean_tx_ring(struct igb_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + + while (i != tx_ring->next_to_use) { + union e1000_adv_tx_desc *eop_desc, *tx_desc; + + /* Free all the Tx ring sk_buffs or xdp frames */ + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + if (!adapter->ecdev) { + if (tx_buffer->type == IGB_TYPE_SKB) + dev_kfree_skb_any(tx_buffer->skb); + else + xdp_return_frame(tx_buffer->xdpf); + } + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGB_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igb_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_tx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igb_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void igb_free_rx_resources(struct igb_ring *rx_ring) +{ + igb_clean_rx_ring(rx_ring); + + rx_ring->xdp_prog = NULL; + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igb_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void igb_free_all_rx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igb_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void igb_clean_rx_ring(struct igb_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igb_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igb_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * igb_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_rx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igb_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int igb_set_mac(struct net_device *netdev, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igb_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igb_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igb_write_mc_addr_list(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igb_update_mc_addr_list(hw, NULL, 0); + igb_restore_vf_multicasts(adapter); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igb_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static int igb_vlan_promisc_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, pf_id; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + case e1000_i350: + /* VLAN filtering needed for VLAN prio filter */ + if (adapter->netdev->features & NETIF_F_NTUPLE) + break; + fallthrough; + case e1000_82576: + case e1000_82580: + case e1000_i354: + /* VLAN filtering needed for pool filtering */ + if (adapter->vfs_allocated_count) + break; + fallthrough; + default: + return 1; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + return 0; + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + /* Add PF to all active pools */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + vlvf |= BIT(pf_id); + wr32(E1000_VLVF(i), vlvf); + } + +set_vfta: + /* Set all bits in the VLAN filter table array */ + for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;) + hw->mac.ops.write_vfta(hw, i, ~0U); + + /* Set flag so we don't redo unnecessary work */ + adapter->flags |= IGB_FLAG_VLAN_PROMISC; + + return 0; +} + +#define VFTA_BLOCK_SIZE 8 +static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * 32; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); + u32 i, vid, word, bits, pf_id; + + /* guarantee that we don't scrub out management VLAN */ + vid = adapter->mng_vlan_id; + if (vid >= vid_start && vid < vid_end) + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + /* pull VLAN ID from VLVF */ + vid = vlvf & VLAN_VID_MASK; + + /* only concern ourselves with a certain range */ + if (vid < vid_start || vid >= vid_end) + continue; + + if (vlvf & E1000_VLVF_VLANID_ENABLE) { + /* record VLAN ID in VFTA */ + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); + + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + bits = ~BIT(pf_id); + bits &= rd32(E1000_VLVF(i)); + wr32(E1000_VLVF(i), bits); + } + +set_vfta: + /* extract values from active_vlans and write back to VFTA */ + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * 32; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->active_vlans[word] >> bits; + + hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]); + } +} + +static void igb_vlan_promisc_disable(struct igb_adapter *adapter) +{ + u32 i; + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + + for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE) + igb_scrub_vfta(adapter, i); +} + +/** + * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igb_set_rx_mode(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = adapter->vfs_allocated_count; + u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + + /* enable use of UTA filter to force packets to default pool */ + if (hw->mac.type == e1000_82576) + vmolr |= E1000_VMOLR_ROPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igb_write_mc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else if (count) { + vmolr |= E1000_VMOLR_ROMPE; + } + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) { + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } + + /* enable VLAN filtering by default */ + rctl |= E1000_RCTL_VFE; + + /* disable VLAN filtering for modes that require it */ + if ((netdev->flags & IFF_PROMISC) || + (netdev->features & NETIF_F_RXALL)) { + /* if we fail to set all rules then just clear VFE */ + if (igb_vlan_promisc_enable(adapter)) + rctl &= ~E1000_RCTL_VFE; + } else { + igb_vlan_promisc_disable(adapter); + } + + /* update state of unicast, multicast, and VLAN filtering modes */ + rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE | + E1000_RCTL_VFE); + wr32(E1000_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (!adapter->vfs_allocated_count) { + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + rlpml = IGB_MAX_FRAME_BUILD_SKB; + } +#endif + wr32(E1000_RLPML, rlpml); + + /* In order to support SR-IOV and eventually VMDq it is necessary to set + * the VMOLR to enable the appropriate modes. Without this workaround + * we will have issues with VLAN tag stripping not being done for frames + * that are only arriving because we are the default pool + */ + if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) + return; + + /* set UTA to appropriate mode */ + igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE)); + + vmolr |= rd32(E1000_VMOLR(vfn)) & + ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); + + /* enable Rx jumbo frames, restrict as needed to support build_skb */ + vmolr &= ~E1000_VMOLR_RLPML_MASK; +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + vmolr |= IGB_MAX_FRAME_BUILD_SKB; + else +#endif + vmolr |= MAX_JUMBO_FRAME_SIZE; + vmolr |= E1000_VMOLR_LPE; + + wr32(E1000_VMOLR(vfn), vmolr); + + igb_restore_vf_multicasts(adapter); +} + +static void igb_check_wvbr(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 wvbr = 0; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + wvbr = rd32(E1000_WVBR); + if (!wvbr) + return; + break; + default: + break; + } + + adapter->wvbr |= wvbr; +} + +#define IGB_STAGGERED_QUEUE_OFFSET 8 + +static void igb_spoof_check(struct igb_adapter *adapter) +{ + int j; + + if (!adapter->wvbr) + return; + + for (j = 0; j < adapter->vfs_allocated_count; j++) { + if (adapter->wvbr & BIT(j) || + adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) { + dev_warn(&adapter->pdev->dev, + "Spoof event(s) detected on VF %d\n", j); + adapter->wvbr &= + ~(BIT(j) | + BIT(j + IGB_STAGGERED_QUEUE_OFFSET)); + } + } +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igb_update_phy_info(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer); + igb_get_phy_info(&adapter->hw); +} + +/** + * igb_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + **/ +bool igb_has_link(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the e1000_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (!hw->mac.get_link_status) + return true; + fallthrough; + case e1000_media_type_internal_serdes: + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + break; + default: + case e1000_media_type_unknown: + break; + } + + if (((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) && + (hw->phy.id == I210_I_PHY_ID)) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) +{ + bool ret = false; + u32 ctrl_ext, thstat; + + /* check for thermal sensor event on i350 copper only */ + if (hw->mac.type == e1000_i350) { + thstat = rd32(E1000_THSTAT); + ctrl_ext = rd32(E1000_CTRL_EXT); + + if ((hw->phy.media_type == e1000_media_type_copper) && + !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) + ret = !!(thstat & event); + } + + return ret; +} + +/** + * igb_check_lvmmc - check for malformed packets received + * and indicated in LVMMC register + * @adapter: pointer to adapter + **/ +static void igb_check_lvmmc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 lvmmc; + + lvmmc = rd32(E1000_LVMMC); + if (lvmmc) { + if (unlikely(net_ratelimit())) { + netdev_warn(adapter->netdev, + "malformed Tx packet detected and dropped, LVMMC:0x%08x\n", + lvmmc); + } + } +} + +/** + * igb_watchdog - Timer Call-back + * @t: pointer to timer_list containing our private info pointer + **/ +static void igb_watchdog(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igb_watchdog_task(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, + struct igb_adapter, + watchdog_task); + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_info *phy = &hw->phy; + struct net_device *netdev = adapter->netdev; + u32 link; + int i; + u32 connsw; + u16 phy_data, retry_count = 20; + + link = igb_has_link(adapter); + + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + /* Force link down if we have fiber to swap to */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + if (hw->phy.media_type == e1000_media_type_copper) { + connsw = rd32(E1000_CONNSW); + if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) + link = 0; + } + } + if (link) { + /* Perform a reset if the media type changed. */ + if (hw->dev_spec._82575.media_changed) { + hw->dev_spec._82575.media_changed = false; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + igb_reset(adapter); + } + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(E1000_CTRL); + /* Links status message must follow this format */ + netdev_info(netdev, + "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && + (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : + (ctrl & E1000_CTRL_RFCE) ? "RX" : + (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGB_FLAG_EEE) && + (adapter->link_duplex == HALF_DUPLEX)) { + dev_info(&adapter->pdev->dev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); + adapter->hw.dev_spec._82575.eee_disable = true; + adapter->flags &= ~IGB_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igb_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_LINK_THROTTLE)) + netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + if (adapter->link_speed != SPEED_1000 || + !hw->phy.ops.read_reg) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igb_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); + } + } else { + dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + igb_ping_all_vfs(adapter); + igb_check_vf_rate_limit(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_PWR_DOWN)) { + netdev_err(netdev, "The network adapter was stopped because it overheated\n"); + } + + /* Links status message must follow this format */ + netdev_info(netdev, "igb: %s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + igb_ping_all_vfs(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* link is down, time to check for alternate media */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + } + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *tx_ring = adapter->tx_ring[i]; + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(E1000_EICS, eics); + } else { + wr32(E1000_ICS, E1000_ICS_RXDMT0); + } + + igb_spoof_check(adapter); + igb_ptp_rx_hang(adapter); + igb_ptp_tx_hang(adapter); + + /* Check LVMMC register on i350/i354 only */ + if ((adapter->hw.mac.type == e1000_i350) || + (adapter->hw.mac.type == e1000_i354)) + igb_check_lvmmc(adapter); + + /* Reset the timer */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * igb_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igb_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + **/ +static void igb_update_ring_itr(struct igb_q_vector *q_vector) +{ + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + struct igb_adapter *adapter = q_vector->adapter; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + if (adapter->link_speed != SPEED_1000) { + new_val = IGB_4K_ITR; + goto set_itr_val; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if ((avg_wire_size > 300) && (avg_wire_size < 1200)) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGB_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGB_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +/** + * igb_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + **/ +static void igb_update_itr(struct igb_q_vector *q_vector, + struct igb_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes/packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igb_set_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = IGB_4K_ITR; + goto set_itr_now; + } + + igb_update_itr(q_vector, &q_vector->tx); + igb_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGB_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGB_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGB_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct e1000_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + struct timespec64 ts; + + context_desc = IGB_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; + + /* For 82575, context index must be unique per ring. */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + ts = ktime_to_timespec64(first->skb->tstamp); + skb_txtime_consumed(first->skb); + context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); + } else { + context_desc->seqnum_seed = 0; + } +} + +static int igb_tso(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? + E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM | + IGB_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; + + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGB_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; + + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); +} + +#define IGB_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = E1000_ADVTXD_DTYP_DATA | + E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN, + (E1000_ADVTXD_DCMD_VLE)); + + /* set segmentation bits for TSO */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO, + (E1000_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, + (E1000_ADVTXD_MAC_TSTAMP)); + + /* insert frame checksum */ + cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igb_tx_olinfo_status(struct igb_ring *tx_ring, + union e1000_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; + + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + + /* insert L4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_CSUM, + (E1000_TXD_POPTS_TXSM << 8)); + + /* insert IPv4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_IPV4, + (E1000_TXD_POPTS_IXSM << 8)); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + struct igb_adapter *adapter = netdev_priv(netdev); + + if (!adapter->ecdev) { + netif_stop_subqueue(netdev, tx_ring->queue_index); + } + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igb_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + if (!adapter->ecdev) { + netif_wake_subqueue(netdev, tx_ring->queue_index); + } + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + if (igb_desc_unused(tx_ring) >= size) + return 0; + return __igb_maybe_stop_tx(tx_ring, size); +} + +static int igb_tx_map(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = igb_tx_cmd_type(skb, tx_flags); + u16 i = tx_ring->next_to_use; + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + + tx_desc = IGB_TX_DESC(tx_ring, i); + + igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGB_MAX_DATA_PER_TXD; + size -= IGB_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + dma_wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + return 0; + +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (!adapter->ecdev) { + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + } + + tx_ring->next_to_use = i; + + return -1; +} + +int igb_xmit_xdp_ring(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + struct xdp_frame *xdpf) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 count, i, index = tx_ring->next_to_use; + struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index]; + struct igb_tx_buffer *tx_buffer = tx_head; + union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index); + u32 len = xdpf->len, cmd_type, olinfo_status; + void *data = xdpf->data; + + count = TXD_USE_COUNT(len); + for (i = 0; i < nr_frags; i++) + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); + + if (igb_maybe_stop_tx(tx_ring, count + 3)) + return IGB_XDP_CONSUMED; + + i = 0; + /* record the location of the first descriptor for this packet */ + tx_head->bytecount = xdp_get_frame_len(xdpf); + tx_head->type = IGB_TYPE_XDP; + tx_head->gso_segs = 1; + tx_head->xdpf = xdpf; + + olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT; + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + for (;;) { + dma_addr_t dma; + + dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto unmap; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + + /* put descriptor type bits */ + cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS | len; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + tx_buffer->protocol = 0; + + if (++index == tx_ring->count) + index = 0; + + if (i == nr_frags) + break; + + tx_buffer = &tx_ring->tx_buffer_info[index]; + tx_desc = IGB_TX_DESC(tx_ring, index); + tx_desc->read.olinfo_status = 0; + + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } + tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD); + + netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount); + /* set the timestamp */ + tx_head->time_stamp = jiffies; + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); + + /* set next_to_watch value indicating a packet is present */ + tx_head->next_to_watch = tx_desc; + tx_ring->next_to_use = index; + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + writel(index, tx_ring->tail); + + return IGB_XDP_TX; + +unmap: + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[index]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == tx_head) + break; + + if (!index) + index += tx_ring->count; + index--; + } + + return IGB_XDP_CONSUMED; +} + +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, + struct igb_ring *tx_ring) +{ + struct igb_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + u8 hdr_len = 0; + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + + /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igb_maybe_stop_tx(tx_ring, count + 3)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGB_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (unlikely(!adapter->ecdev && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGB_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + if (adapter->hw.mac.type == e1000_82576) + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGB_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igb_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igb_tx_csum(tx_ring, first); + + if (igb_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; + + return NETDEV_TX_OK; + +out_drop: + if (!adapter->ecdev) { + dev_kfree_skb_any(first->skb); + first->skb = NULL; + } +cleanup_tx_tstamp: + if (unlikely(!adapter->ecdev && tx_flags & IGB_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + if (adapter->hw.mac.type == e1000_82576) + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + } + + return NETDEV_TX_OK; +} + +static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); +} + +/** + * igb_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + + if (hw->mac.type >= e1000_82580) + hw->dev_spec._82575.global_device_reset = true; + + schedule_work(&adapter->reset_task); + wr32(E1000_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +static void igb_reset_task(struct work_struct *work) +{ + struct igb_adapter *adapter; + adapter = container_of(work, struct igb_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igb_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igb_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igb_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + **/ +static void igb_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +/** + * igb_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int igb_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; + + if (adapter->xdp_prog) { + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + if (max_frame > igb_rx_bufsz(ring)) { + netdev_warn(adapter->netdev, + "Requested MTU size is not supported with XDP. Max frame size is %d\n", + max_frame); + return -EINVAL; + } + } + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igb_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igb_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igb_up(adapter); + else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + + return 0; +} + +/** + * igb_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void igb_update_stats(struct igb_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 reg, mpc; + int i; + u64 bytes, packets; + unsigned int start; + u64 _bytes, _packets; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + bytes = 0; + packets = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(E1000_RQDPC(i)); + if (hw->mac.type >= e1000_i210) + wr32(E1000_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(E1000_CRCERRS); + adapter->stats.gprc += rd32(E1000_GPRC); + adapter->stats.gorc += rd32(E1000_GORCL); + rd32(E1000_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(E1000_BPRC); + adapter->stats.mprc += rd32(E1000_MPRC); + adapter->stats.roc += rd32(E1000_ROC); + + adapter->stats.prc64 += rd32(E1000_PRC64); + adapter->stats.prc127 += rd32(E1000_PRC127); + adapter->stats.prc255 += rd32(E1000_PRC255); + adapter->stats.prc511 += rd32(E1000_PRC511); + adapter->stats.prc1023 += rd32(E1000_PRC1023); + adapter->stats.prc1522 += rd32(E1000_PRC1522); + adapter->stats.symerrs += rd32(E1000_SYMERRS); + adapter->stats.sec += rd32(E1000_SEC); + + mpc = rd32(E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(E1000_SCC); + adapter->stats.ecol += rd32(E1000_ECOL); + adapter->stats.mcc += rd32(E1000_MCC); + adapter->stats.latecol += rd32(E1000_LATECOL); + adapter->stats.dc += rd32(E1000_DC); + adapter->stats.rlec += rd32(E1000_RLEC); + adapter->stats.xonrxc += rd32(E1000_XONRXC); + adapter->stats.xontxc += rd32(E1000_XONTXC); + adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); + adapter->stats.xofftxc += rd32(E1000_XOFFTXC); + adapter->stats.fcruc += rd32(E1000_FCRUC); + adapter->stats.gptc += rd32(E1000_GPTC); + adapter->stats.gotc += rd32(E1000_GOTCL); + rd32(E1000_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(E1000_RNBC); + adapter->stats.ruc += rd32(E1000_RUC); + adapter->stats.rfc += rd32(E1000_RFC); + adapter->stats.rjc += rd32(E1000_RJC); + adapter->stats.tor += rd32(E1000_TORH); + adapter->stats.tot += rd32(E1000_TOTH); + adapter->stats.tpr += rd32(E1000_TPR); + + adapter->stats.ptc64 += rd32(E1000_PTC64); + adapter->stats.ptc127 += rd32(E1000_PTC127); + adapter->stats.ptc255 += rd32(E1000_PTC255); + adapter->stats.ptc511 += rd32(E1000_PTC511); + adapter->stats.ptc1023 += rd32(E1000_PTC1023); + adapter->stats.ptc1522 += rd32(E1000_PTC1522); + + adapter->stats.mptc += rd32(E1000_MPTC); + adapter->stats.bptc += rd32(E1000_BPTC); + + adapter->stats.tpt += rd32(E1000_TPT); + adapter->stats.colc += rd32(E1000_COLC); + + adapter->stats.algnerrc += rd32(E1000_ALGNERRC); + /* read internal phy specific stats */ + reg = rd32(E1000_CTRL_EXT); + if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { + adapter->stats.rxerrc += rd32(E1000_RXERRC); + + /* this stat has invalid values on i210/i211 */ + if ((hw->mac.type != e1000_i210) && + (hw->mac.type != e1000_i211)) + adapter->stats.tncrs += rd32(E1000_TNCRS); + } + + adapter->stats.tsctc += rd32(E1000_TSCTC); + adapter->stats.tsctfc += rd32(E1000_TSCTFC); + + adapter->stats.iac += rd32(E1000_IAC); + adapter->stats.icrxoc += rd32(E1000_ICRXOC); + adapter->stats.icrxptc += rd32(E1000_ICRXPTC); + adapter->stats.icrxatc += rd32(E1000_ICRXATC); + adapter->stats.ictxptc += rd32(E1000_ICTXPTC); + adapter->stats.ictxatc += rd32(E1000_ICTXATC); + adapter->stats.ictxqec += rd32(E1000_ICTXQEC); + adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); + adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(E1000_MGTPTC); + adapter->stats.mgprc += rd32(E1000_MGTPRC); + adapter->stats.mgpdc += rd32(E1000_MGTPDC); + + /* OS2BMC Stats */ + reg = rd32(E1000_MANC); + if (reg & E1000_MANC_EN_BMC2OS) { + adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); + adapter->stats.o2bspc += rd32(E1000_O2BSPC); + adapter->stats.b2ospc += rd32(E1000_B2OSPC); + adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); + } +} + +static void igb_perout(struct igb_adapter *adapter, int tsintr_tt) +{ + int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt); + struct e1000_hw *hw = &adapter->hw; + struct timespec64 ts; + u32 tsauxc; + + if (pin < 0 || pin >= IGB_N_SDP) + return; + + spin_lock(&adapter->tmreg_lock); + + if (hw->mac.type == e1000_82580 || + hw->mac.type == e1000_i354 || + hw->mac.type == e1000_i350) { + s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period); + u32 systiml, systimh, level_mask, level, rem; + u64 systim, now; + + /* read systim registers in sequence */ + rd32(E1000_SYSTIMR); + systiml = rd32(E1000_SYSTIML); + systimh = rd32(E1000_SYSTIMH); + systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml); + now = timecounter_cyc2time(&adapter->tc, systim); + + if (pin < 2) { + level_mask = (tsintr_tt == 1) ? 0x80000 : 0x40000; + level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0; + } else { + level_mask = (tsintr_tt == 1) ? 0x80 : 0x40; + level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0; + } + + div_u64_rem(now, ns, &rem); + systim = systim + (ns - rem); + + /* synchronize pin level with rising/falling edges */ + div_u64_rem(now, ns << 1, &rem); + if (rem < ns) { + /* first half of period */ + if (level == 0) { + /* output is already low, skip this period */ + systim += ns; + pr_notice("igb: periodic output on %s missed falling edge\n", + adapter->sdp_config[pin].name); + } + } else { + /* second half of period */ + if (level == 1) { + /* output is already high, skip this period */ + systim += ns; + pr_notice("igb: periodic output on %s missed rising edge\n", + adapter->sdp_config[pin].name); + } + } + + /* for this chip family tv_sec is the upper part of the binary value, + * so not seconds + */ + ts.tv_nsec = (u32)systim; + ts.tv_sec = ((u32)(systim >> 32)) & 0xFF; + } else { + ts = timespec64_add(adapter->perout[tsintr_tt].start, + adapter->perout[tsintr_tt].period); + } + + /* u32 conversion of tv_sec is safe until y2106 */ + wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec); + wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT0; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[tsintr_tt].start = ts; + + spin_unlock(&adapter->tmreg_lock); +} + +static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) +{ + int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt); + int auxstmpl = (tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0; + int auxstmph = (tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0; + struct e1000_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + unsigned long flags; + + if (pin < 0 || pin >= IGB_N_SDP) + return; + + if (hw->mac.type == e1000_82580 || + hw->mac.type == e1000_i354 || + hw->mac.type == e1000_i350) { + u64 ns = rd32(auxstmpl); + + ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32; + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + ts = ns_to_timespec64(ns); + } else { + ts.tv_nsec = rd32(auxstmpl); + ts.tv_sec = rd32(auxstmph); + } + + event.type = PTP_CLOCK_EXTTS; + event.index = tsintr_tt; + event.timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec; + ptp_clock_event(adapter->ptp_clock, &event); +} + +static void igb_tsync_interrupt(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ack = 0, tsicr = rd32(E1000_TSICR); + struct ptp_clock_event event; + + if (tsicr & TSINTR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_SYS_WRAP; + } + + if (tsicr & E1000_TSICR_TXTS) { + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + ack |= E1000_TSICR_TXTS; + } + + if (tsicr & TSINTR_TT0) { + igb_perout(adapter, 0); + ack |= TSINTR_TT0; + } + + if (tsicr & TSINTR_TT1) { + igb_perout(adapter, 1); + ack |= TSINTR_TT1; + } + + if (tsicr & TSINTR_AUTT0) { + igb_extts(adapter, 0); + ack |= TSINTR_AUTT0; + } + + if (tsicr & TSINTR_AUTT1) { + igb_extts(adapter, 1); + ack |= TSINTR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(E1000_TSICR, ack); +} + +static irqreturn_t igb_msix_other(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct e1000_hw *hw = &adapter->hw; + u32 icr = rd32(E1000_ICR); + /* reading ICR causes bit 31 of EICR to be cleared */ + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + /* The DMA Out of Sync is also indication of a spoof event + * in IOV mode. Check the Wrong VM Behavior register to + * see if it is really a spoof event. + */ + igb_check_wvbr(adapter); + } + + /* Check for a mailbox event */ + if (icr & E1000_ICR_VMMB) + igb_msg_task(adapter); + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + wr32(E1000_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igb_write_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 itr_val = q_vector->itr_val & 0x7FFC; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = 0x4; + + if (adapter->hw.mac.type == e1000_82575) + itr_val |= itr_val << 16; + else + itr_val |= E1000_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igb_msix_ring(int irq, void *data) +{ + struct igb_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igb_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_IGB_DCA +static void igb_update_tx_dca(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + + if (hw->mac.type != e1000_82575) + txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; + + /* We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN | + E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); +} + +static void igb_update_rx_dca(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); + + if (hw->mac.type != e1000_82575) + rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; + + /* We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | + E1000_DCA_RXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); +} + +static void igb_update_dca(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + int cpu = get_cpu(); + + if (q_vector->cpu == cpu) + goto out_no_update; + + if (q_vector->tx.ring) + igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); + + if (q_vector->rx.ring) + igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); + + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +static void igb_setup_dca(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) + return; + + /* Always use CB2 mode, difference is masked in the CB driver. */ + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); + + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; + igb_update_dca(adapter->q_vector[i]); + } +} + +static int __igb_notify_dca(struct device *dev, void *data) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + unsigned long event = *(unsigned long *)data; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if already enabled, don't do it again */ + if (adapter->flags & IGB_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + break; + } + fallthrough; /* since DCA is disabled. */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + /* without this a class_device is left + * hanging around in the sysfs model + */ + dca_remove_requester(dev); + dev_info(&pdev->dev, "DCA disabled\n"); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } + break; + } + + return 0; +} + +static int igb_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, + __igb_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} +#endif /* CONFIG_IGB_DCA */ + +#ifdef CONFIG_PCI_IOV +static int igb_vf_configure(struct igb_adapter *adapter, int vf) +{ + unsigned char mac_addr[ETH_ALEN]; + + eth_zero_addr(mac_addr); + igb_set_vf_mac(adapter, vf, mac_addr); + + /* By default spoof check is enabled for all VFs */ + adapter->vf_data[vf].spoofchk_enabled = true; + + /* By default VFs are not trusted */ + adapter->vf_data[vf].trusted = false; + + return 0; +} + +#endif +static void igb_ping_all_vfs(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0 ; i < adapter->vfs_allocated_count; i++) { + ping = E1000_PF_CONTROL_MSG; + if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) + ping |= E1000_VT_MSGTYPE_CTS; + igb_write_mbx(hw, &ping, 1, i); + } +} + +static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr = rd32(E1000_VMOLR(vf)); + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | + IGB_VF_FLAG_MULTI_PROMISC); + vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { + vmolr |= E1000_VMOLR_MPME; + vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; + *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; + } else { + /* if we have hashes and we are clearing a multicast promisc + * flag we need to write the hashes to the MTA as this step + * was previously skipped + */ + if (vf_data->num_vf_mc_hashes > 30) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + int j; + + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + } + + wr32(E1000_VMOLR(vf), vmolr); + + /* there are flags left unprocessed, likely not supported */ + if (*msgbuf & E1000_VT_MSGINFO_MASK) + return -EINVAL; + + return 0; +} + +static int igb_set_vf_multicasts(struct igb_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + int i; + + /* salt away the number of multicast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vf_data->num_vf_mc_hashes = n; + + /* only up to 30 hash values supported */ + if (n > 30) + n = 30; + + /* store the hashes for later use */ + for (i = 0; i < n; i++) + vf_data->vf_mc_hashes[i] = hash_list[i]; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); + + return 0; +} + +static void igb_restore_vf_multicasts(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data; + int i, j; + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + u32 vmolr = rd32(E1000_VMOLR(i)); + + vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + vf_data = &adapter->vf_data[i]; + + if ((vf_data->num_vf_mc_hashes > 30) || + (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + wr32(E1000_VMOLR(i), vmolr); + } +} + +static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pool_mask, vlvf_mask, i; + + /* create mask for VF and other pools */ + pool_mask = E1000_VLVF_POOLSEL_MASK; + vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf); + + /* drop PF from pool bits */ + pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count); + + /* Find the vlan filter for this id */ + for (i = E1000_VLVF_ARRAY_SIZE; i--;) { + u32 vlvf = rd32(E1000_VLVF(i)); + u32 vfta_mask, vid, vfta; + + /* remove the vf from the pool */ + if (!(vlvf & vlvf_mask)) + continue; + + /* clear out bit from VLVF */ + vlvf ^= vlvf_mask; + + /* if other pools are present, just remove ourselves */ + if (vlvf & pool_mask) + goto update_vlvfb; + + /* if PF is present, leave VFTA */ + if (vlvf & E1000_VLVF_POOLSEL_MASK) + goto update_vlvf; + + vid = vlvf & E1000_VLVF_VLANID_MASK; + vfta_mask = BIT(vid % 32); + + /* clear bit from VFTA */ + vfta = adapter->shadow_vfta[vid / 32]; + if (vfta & vfta_mask) + hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask); +update_vlvf: + /* clear pool selection enable */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + vlvf &= E1000_VLVF_POOLSEL_MASK; + else + vlvf = 0; +update_vlvfb: + /* clear pool bits */ + wr32(E1000_VLVF(i), vlvf); + } +} + +static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan) +{ + u32 vlvf; + int idx; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the VLAN id in the VLVF entries */ + for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) { + vlvf = rd32(E1000_VLVF(idx)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + return idx; +} + +static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) +{ + struct e1000_hw *hw = &adapter->hw; + u32 bits, pf_id; + int idx; + + idx = igb_find_vlvf_entry(hw, vid); + if (!idx) + return; + + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK; + bits &= rd32(E1000_VLVF(idx)); + + /* Disable the filter so this falls into the default pool. */ + if (!bits) { + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + wr32(E1000_VLVF(idx), BIT(pf_id)); + else + wr32(E1000_VLVF(idx), 0); + } +} + +static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, + bool add, u32 vf) +{ + int pf_id = adapter->vfs_allocated_count; + struct e1000_hw *hw = &adapter->hw; + int err; + + /* If VLAN overlaps with one the PF is currently monitoring make + * sure that we are able to allocate a VLVF entry. This may be + * redundant but it guarantees PF will maintain visibility to + * the VLAN. + */ + if (add && test_bit(vid, adapter->active_vlans)) { + err = igb_vfta_set(hw, vid, pf_id, true, false); + if (err) + return err; + } + + err = igb_vfta_set(hw, vid, vf, add, false); + + if (add && !err) + return err; + + /* If we failed to add the VF VLAN or we are removing the VF VLAN + * we may need to drop the PF pool bit in order to allow us to free + * up the VLVF resources. + */ + if (test_bit(vid, adapter->active_vlans) || + (adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_update_pf_vlvf(adapter, vid); + + return err; +} + +static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + + if (vid) + wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); + else + wr32(E1000_VMVIR(vf), 0); +} + +static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf, + u16 vlan, u8 qos) +{ + int err; + + err = igb_set_vf_vlan(adapter, vlan, true, vf); + if (err) + return err; + + igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vlan); + + /* revoke access to previous VLAN */ + if (vlan != adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; + igb_set_vf_vlan_strip(adapter, vf, true); + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + + return err; +} + +static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf) +{ + /* Restore tagless access via VLAN 0 */ + igb_set_vf_vlan(adapter, 0, true, vf); + + igb_set_vmvir(adapter, 0, vf); + igb_set_vmolr(adapter, vf, true); + + /* Remove any PF assigned VLAN */ + if (adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; + igb_set_vf_vlan_strip(adapter, vf, false); + + return 0; +} + +static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) : + igb_disable_port_vlan(adapter, vf); +} + +static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + int ret; + + if (adapter->vf_data[vf].pf_vlan) + return -1; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + ret = igb_set_vf_vlan(adapter, vid, !!add, vf); + if (!ret) + igb_set_vf_vlan_strip(adapter, vf, !!vid); + return ret; +} + +static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) +{ + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + /* clear flags - except flag that indicates PF has set the MAC */ + vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC; + vf_data->last_nack = jiffies; + + /* reset vlans for device */ + igb_clear_vf_vfta(adapter, vf); + igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf); + igb_set_vmvir(adapter, vf_data->pf_vlan | + (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); + igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan)); + + /* reset multicast table array for vf */ + adapter->vf_data[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); +} + +static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) +{ + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + + /* clear mac address as we were hotplug removed/added */ + if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) + eth_zero_addr(vf_mac); + + /* process remaining reset events */ + igb_vf_reset(adapter, vf); +} + +static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + u32 reg, msgbuf[3] = {}; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* process all the same items cleared in a function level reset */ + igb_vf_reset(adapter, vf); + + /* set vf mac address */ + igb_set_vf_mac(adapter, vf, vf_mac); + + /* enable transmit and receive for vf */ + reg = rd32(E1000_VFTE); + wr32(E1000_VFTE, reg | BIT(vf)); + reg = rd32(E1000_VFRE); + wr32(E1000_VFRE, reg | BIT(vf)); + + adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; + + /* reply to reset with ack and vf mac address */ + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK; + } + igb_write_mbx(hw, msgbuf, 3, vf); +} + +static void igb_flush_mac_table(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.rar_entry_count; i++) { + adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; + eth_zero_addr(adapter->mac_table[i].addr); + adapter->mac_table[i].queue = 0; + igb_rar_set_index(adapter, i); + } +} + +static int igb_available_rars(struct igb_adapter *adapter, u8 queue) +{ + struct e1000_hw *hw = &adapter->hw; + /* do not count rar entries reserved for VFs MAC addresses */ + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i, count = 0; + + for (i = 0; i < rar_entries; i++) { + /* do not count default entries */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) + continue; + + /* do not count "in use" entries for different queues */ + if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) && + (adapter->mac_table[i].queue != queue)) + continue; + + count++; + } + + return count; +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igb_set_default_mac_filter(struct igb_adapter *adapter) +{ + struct igb_mac_addr *mac_table = &adapter->mac_table[0]; + + ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); + mac_table->queue = adapter->vfs_allocated_count; + mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + + igb_rar_set_index(adapter, 0); +} + +/* If the filter to be added and an already existing filter express + * the same address and address type, it should be possible to only + * override the other configurations, for example the queue to steer + * traffic. + */ +static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry, + const u8 *addr, const u8 flags) +{ + if (!(entry->state & IGB_MAC_STATE_IN_USE)) + return true; + + if ((entry->state & IGB_MAC_STATE_SRC_ADDR) != + (flags & IGB_MAC_STATE_SRC_ADDR)) + return false; + + if (!ether_addr_equal(addr, entry->addr)) + return false; + + return true; +} + +/* Add a MAC filter for 'addr' directing matching traffic to 'queue', + * 'flags' is used to indicate what kind of match is made, match is by + * default for the destination address, if matching by source address + * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_add_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for the first empty entry in the MAC table. + * Do not touch entries at the end of the table reserved for the VF MAC + * addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!igb_mac_entry_can_be_used(&adapter->mac_table[i], + addr, flags)) + continue; + + ether_addr_copy(adapter->mac_table[i].addr, addr); + adapter->mac_table[i].queue = queue; + adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags; + + igb_rar_set_index(adapter, i); + return i; + } + + return -ENOSPC; +} + +static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + return igb_add_mac_filter_flags(adapter, addr, queue, 0); +} + +/* Remove a MAC filter for 'addr' directing matching traffic to + * 'queue', 'flags' is used to indicate what kind of match need to be + * removed, match is by default for the destination address, if + * matching by source address is to be removed the flag + * IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_del_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for matching entry in the MAC table based on given address + * and queue. Do not touch entries at the end of the table reserved + * for the VF MAC addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) + continue; + if ((adapter->mac_table[i].state & flags) != flags) + continue; + if (adapter->mac_table[i].queue != queue) + continue; + if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) + continue; + + /* When a filter for the default address is "deleted", + * we return it to its initial configuration + */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) { + adapter->mac_table[i].state = + IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + adapter->mac_table[i].queue = + adapter->vfs_allocated_count; + } else { + adapter->mac_table[i].state = 0; + adapter->mac_table[i].queue = 0; + eth_zero_addr(adapter->mac_table[i].addr); + } + + igb_rar_set_index(adapter, i); + return 0; + } + + return -ENOENT; +} + +static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, 0); +} + +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + + /* In theory, this should be supported on 82575 as well, but + * that part wasn't easily accessible during development. + */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + return igb_add_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + +static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count); + + return min_t(int, ret, 0); +} + +static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count); + + return 0; +} + +static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, + const u32 info, const u8 *addr) +{ + struct pci_dev *pdev = adapter->pdev; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + struct list_head *pos; + struct vf_mac_filter *entry = NULL; + int ret = 0; + + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { + dev_warn(&pdev->dev, + "VF %d requested MAC filter but is administratively denied\n", + vf); + return -EINVAL; + } + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC filter\n", + vf); + return -EINVAL; + } + + switch (info) { + case E1000_VF_MAC_FILTER_CLR: + /* remove all unicast MAC filters related to the current VF */ + list_for_each(pos, &adapter->vf_macs.l) { + entry = list_entry(pos, struct vf_mac_filter, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + igb_del_mac_filter(adapter, entry->vf_mac, vf); + } + } + break; + case E1000_VF_MAC_FILTER_ADD: + /* try to find empty slot in the list */ + list_for_each(pos, &adapter->vf_macs.l) { + entry = list_entry(pos, struct vf_mac_filter, l); + if (entry->free) + break; + } + + if (entry && entry->free) { + entry->free = false; + entry->vf = vf; + ether_addr_copy(entry->vf_mac, addr); + + ret = igb_add_mac_filter(adapter, addr, vf); + ret = min_t(int, ret, 0); + } else { + ret = -ENOSPC; + } + + if (ret == -ENOSPC) + dev_warn(&pdev->dev, + "VF %d has requested MAC filter but there is no space for it\n", + vf); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) +{ + struct pci_dev *pdev = adapter->pdev; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + u32 info = msg[0] & E1000_VT_MSGINFO_MASK; + + /* The VF MAC Address is stored in a packed array of bytes + * starting at the second 32 bit word of the msg array + */ + unsigned char *addr = (unsigned char *)&msg[1]; + int ret = 0; + + if (!info) { + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { + dev_warn(&pdev->dev, + "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", + vf); + return -EINVAL; + } + + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC\n", + vf); + return -EINVAL; + } + + ret = igb_set_vf_mac(adapter, vf, addr); + } else { + ret = igb_set_vf_mac_filter(adapter, vf, info, addr); + } + + return ret; +} + +static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + u32 msg = E1000_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!(vf_data->flags & IGB_VF_FLAG_CTS) && + time_after(jiffies, vf_data->last_nack + (2 * HZ))) { + igb_write_mbx(hw, &msg, 1, vf); + vf_data->last_nack = jiffies; + } +} + +static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct pci_dev *pdev = adapter->pdev; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + s32 retval; + + retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false); + + if (retval) { + /* if receive failed revoke VF CTS stats and restart init */ + dev_err(&pdev->dev, "Error receiving message from VF\n"); + vf_data->flags &= ~IGB_VF_FLAG_CTS; + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + goto unlock; + goto out; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) + goto unlock; + + /* until the vf completes a reset it should not be + * allowed to start any configuration. + */ + if (msgbuf[0] == E1000_VF_RESET) { + /* unlocks mailbox */ + igb_vf_reset_msg(adapter, vf); + return; + } + + if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + goto unlock; + retval = -1; + goto out; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case E1000_VF_SET_MAC_ADDR: + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case E1000_VF_SET_PROMISC: + retval = igb_set_vf_promisc(adapter, msgbuf, vf); + break; + case E1000_VF_SET_MULTICAST: + retval = igb_set_vf_multicasts(adapter, msgbuf, vf); + break; + case E1000_VF_SET_LPE: + retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); + break; + case E1000_VF_SET_VLAN: + retval = -1; + if (vf_data->pf_vlan) + dev_warn(&pdev->dev, + "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", + vf); + else + retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + default: + dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); + retval = -1; + break; + } + + msgbuf[0] |= E1000_VT_MSGTYPE_CTS; +out: + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; + else + msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + + /* unlocks mailbox */ + igb_write_mbx(hw, msgbuf, 1, vf); + return; + +unlock: + igb_unlock_mbx(hw, vf); +} + +static void igb_msg_task(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + u32 vf; + + spin_lock_irqsave(&adapter->vfs_lock, flags); + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { + /* process any reset requests */ + if (!igb_check_for_rst(hw, vf)) + igb_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!igb_check_for_msg(hw, vf)) + igb_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!igb_check_for_ack(hw, vf)) + igb_rcv_ack_from_vf(adapter, vf); + } + spin_unlock_irqrestore(&adapter->vfs_lock, flags); +} + +/** + * igb_set_uta - Set unicast filter table address + * @adapter: board private structure + * @set: boolean indicating if we are setting or clearing bits + * + * The unicast table address is a register array of 32-bit registers. + * The table is meant to be used in a way similar to how the MTA is used + * however due to certain limitations in the hardware it is necessary to + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous + * enable bit to allow vlan tag stripping when promiscuous mode is enabled + **/ +static void igb_set_uta(struct igb_adapter *adapter, bool set) +{ + struct e1000_hw *hw = &adapter->hw; + u32 uta = set ? ~0 : 0; + int i; + + /* we only need to do this if VMDq is enabled */ + if (!adapter->vfs_allocated_count) + return; + + for (i = hw->mac.uta_reg_count; i--;) + array_wr32(E1000_UTA, i, uta); +} + +/** + * igb_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr_msi(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(E1000_ICR); + + igb_write_itr(q_vector); + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igb_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(E1000_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igb_write_itr(q_vector); + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igb_ring_irq_enable(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if ((adapter->num_q_vectors == 1) && !adapter->vf_data) + igb_set_itr(q_vector); + else + igb_update_ring_itr(q_vector); + } + + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) + wr32(E1000_EIMS, q_vector->eims_value); + else + igb_irq_enable(adapter); + } +} + +/** + * igb_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + **/ +static int igb_poll(struct napi_struct *napi, int budget) +{ + struct igb_q_vector *q_vector = container_of(napi, + struct igb_q_vector, + napi); + bool clean_complete = true; + int work_done = 0; + +#ifdef CONFIG_IGB_DCA + if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) + igb_update_dca(q_vector); +#endif + if (q_vector->tx.ring) + clean_complete = igb_clean_tx_irq(q_vector, budget); + + if (q_vector->rx.ring) { + int cleaned = igb_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igb_ring_irq_enable(q_vector); + + return work_done; +} + +/** + * igb_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + **/ +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *tx_ring = q_vector->tx.ring; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__IGB_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGB_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + if (!adapter->ecdev) { + if (tx_buffer->type == IGB_TYPE_SKB) + napi_consume_skb(tx_buffer->skb, napi_budget); + else + xdp_return_frame(tx_buffer->xdpf); + } + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + dma_unmap_len_set(tx_buffer, len, 0); + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + if (!adapter->ecdev) { + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + } + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; + u64_stats_update_end(&tx_ring->tx_syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (!adapter->ecdev && + test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct e1000_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + dev_err(tx_ring->dev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(E1000_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(!adapter->ecdev && total_packets && + netif_carrier_ok(tx_ring->netdev) && + igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +/** + * igb_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void igb_reuse_rx_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *old_buff) +{ + struct igb_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, + int rx_buf_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGB_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGB_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igb_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + **/ +static void igb_add_rx_frag(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGB_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + unsigned int size = xdp->data_end - xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGB_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (xdp->data + headlen) - page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* build an skb around the page buffer */ + skb = napi_build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); + + if (metasize) + skb_metadata_set(skb, metasize); + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + struct xdp_buff *xdp) +{ + int err, result = IGB_XDP_PASS; + struct bpf_prog *xdp_prog; + u32 act; + + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) + goto xdp_out; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + result = igb_xdp_xmit_back(adapter, xdp); + if (result == IGB_XDP_CONSUMED) + goto out_failure; + break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (err) + goto out_failure; + result = IGB_XDP_REDIR; + break; + default: + bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = IGB_XDP_CONSUMED; + break; + } +xdp_out: + return ERR_PTR(-result); +} + +static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGB_SKB_PAD + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +static void igb_rx_buffer_flip(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + unsigned int size) +{ + unsigned int truesize = igb_rx_frame_truesize(rx_ring, size); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static inline void igb_rx_checksum(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igb_test_staterr(rx_desc, + E1000_RXDEXT_STATERR_TCPE | + E1000_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets, (aka let the stack check the crc32c) + */ + if (!((skb->len == 60) && + test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | + E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(ring->dev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +static inline void igb_rx_hash(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); +} + +/** + * igb_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool igb_is_non_eop(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGB_RX_DESC(rx_ring, ntc)); + + if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igb_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool igb_cleanup_headers(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely((igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { + struct net_device *netdev = rx_ring->netdev; + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * igb_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + + igb_rx_hash(rx_ring, rx_desc, skb); + + igb_rx_checksum(rx_ring, rx_desc, skb); + + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && + !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { + u16 vid; + + if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && + test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static unsigned int igb_rx_offset(struct igb_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; +} + +static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, + const unsigned int size, int *rx_buf_pgcnt) +{ + struct igb_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buf_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igb_put_rx_buffer(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt) +{ + if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) { + /* hand second half of page back to the ring */ + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = igb_desc_unused(rx_ring); + unsigned int xdp_xmit = 0; + struct xdp_buff xdp; + u32 frame_sz = 0; + int rx_buf_pgcnt; + + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#if (PAGE_SIZE < 8192) + frame_sz = igb_rx_frame_truesize(rx_ring, 0); +#endif + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); + + while (likely(total_packets < budget)) { + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *rx_buffer; + ktime_t timestamp = 0; + int pkt_offset = 0; + unsigned int size; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGB_RX_BUFFER_WRITE) { + igb_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + if (adapter->ecdev) { + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); + ecdev_receive(adapter->ecdev, pktbuf, size); + adapter->ec_watchdog_jiffies = jiffies; + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* pull rx packet timestamp if available and valid */ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + int ts_hdr_len; + + ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector, + pktbuf, ×tamp); + + pkt_offset += ts_hdr_len; + size -= ts_hdr_len; + } + + /* retrieve a buffer from the ring */ + if (!skb) { + unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring); + unsigned int offset = pkt_offset + igb_rx_offset(rx_ring); + + xdp_prepare_buff(&xdp, hard_start, offset, size, true); + xdp_buff_clear_frags_flag(&xdp); + #if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); + #endif + skb = igb_run_xdp(adapter, rx_ring, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) { + xdp_xmit |= xdp_res; + igb_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_packets++; + total_bytes += size; + } else if (skb) + igb_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igb_build_skb(rx_ring, rx_buffer, &xdp, + timestamp); + else + skb = igb_construct_skb(rx_ring, rx_buffer, + &xdp, timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt); + } + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igb_is_non_eop(rx_ring, rx_desc)) + continue; + + if (adapter->ecdev) { + total_packets++; + continue; + } + + /* verify the packet layout is correct */ + if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + if (xdp_xmit & IGB_XDP_REDIR) + xdp_do_flush(); + + if (xdp_xmit & IGB_XDP_TX) { + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); + + igb_xdp_ring_update_tail(tx_ring); + } + + u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; + u64_stats_update_end(&rx_ring->rx_syncp); + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; + + if (cleaned_count) + igb_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igb_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igb_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, igb_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igb_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igb_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: rx descriptor ring to allocate new receive buffers + * @cleaned_count: count of buffers to allocate + **/ +void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGB_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igb_rx_bufsz(rx_ring); + + do { + if (!igb_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGB_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, rx_ring->tail); + } +} + +/** + * igb_mii_ioctl - + * @netdev: pointer to netdev struct + * @ifr: interface structure + * @cmd: ioctl command to execute + **/ +static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) + return -EIO; + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * igb_ioctl - + * @netdev: pointer to netdev struct + * @ifr: interface structure + * @cmd: ioctl command to execute + **/ +static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return igb_mii_ioctl(netdev, ifr, cmd); + case SIOCGHWTSTAMP: + return igb_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igb_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_read_word(adapter->pdev, reg, value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_write_word(adapter->pdev, reg, *value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl; + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + + /* Disable CFI check */ + rctl = rd32(E1000_RCTL); + rctl &= ~E1000_RCTL_CFIEN; + wr32(E1000_RCTL, rctl); + } else { + /* disable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl &= ~E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + } + + igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable); +} + +static int igb_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + + /* add the filter since PF can receive vlans w/o entry in vlvf */ + if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, true, !!vid); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int igb_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int pf_id = adapter->vfs_allocated_count; + struct e1000_hw *hw = &adapter->hw; + + /* remove VID from filter table */ + if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, false, true); + + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +static void igb_restore_vlan(struct igb_adapter *adapter) +{ + u16 vid = 1; + + igb_vlan_mode(adapter->netdev, adapter->netdev->features); + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NIC's only allow 1000 gbps Full duplex + * and 100Mbps Full duplex for 100baseFx sfp + */ + if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + case SPEED_10 + DUPLEX_FULL: + case SPEED_100 + DUPLEX_HALF: + goto err_inval; + default: + break; + } + } + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + bool wake; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igb_close(netdev, true); + + igb_ptp_suspend(adapter); + + igb_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + igb_setup_rctl(adapter); + igb_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = rd32(E1000_RCTL); + rctl |= E1000_RCTL_MPE; + wr32(E1000_RCTL, rctl); + } + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_ADVD3WUC; + wr32(E1000_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igb_disable_pcie_master(hw); + + wr32(E1000_WUC, E1000_WUC_PME_EN); + wr32(E1000_WUFC, wufc); + } else { + wr32(E1000_WUC, 0); + wr32(E1000_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igb_power_down_link(adapter); + else + igb_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +static void igb_deliver_wake_packet(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if ((wupl == 0) || (wupl > E1000_WUPM_BYTES)) + return; + + skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igb_suspend(struct device *dev) +{ + return __igb_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused __igb_resume(struct device *dev, bool rpm) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "igb: Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + val = rd32(E1000_WUS); + if (val & WAKE_PKT_WUS) + igb_deliver_wake_packet(netdev); + + wr32(E1000_WUS, ~0); + + if (!rpm) + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igb_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + if (!rpm) + rtnl_unlock(); + + return err; +} + +static int __maybe_unused igb_resume(struct device *dev) +{ + return __igb_resume(dev, false); +} + +static int __maybe_unused igb_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (!igb_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} + +static int __maybe_unused igb_runtime_suspend(struct device *dev) +{ + return __igb_shutdown(to_pci_dev(dev), NULL, 1); +} + +static int __maybe_unused igb_runtime_resume(struct device *dev) +{ + return __igb_resume(dev, true); +} + +static void igb_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igb_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PCI_IOV +static int igb_sriov_reinit(struct pci_dev *dev) +{ + struct net_device *netdev = pci_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + rtnl_lock(); + + if (netif_running(netdev)) + igb_close(netdev); + else + igb_reset(adapter); + + igb_clear_interrupt_scheme(adapter); + + igb_init_queue_configuration(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + rtnl_unlock(); + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + igb_open(netdev); + + rtnl_unlock(); + + return 0; +} + +static int igb_pci_disable_sriov(struct pci_dev *dev) +{ + int err = igb_disable_sriov(dev); + + if (!err) + err = igb_sriov_reinit(dev); + + return err; +} + +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) +{ + int err = igb_enable_sriov(dev, num_vfs); + + if (err) + goto out; + + err = igb_sriov_reinit(dev); + if (!err) + return num_vfs; + +out: + return err; +} + +#endif +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + if (num_vfs == 0) + return igb_pci_disable_sriov(dev); + else + return igb_pci_enable_sriov(dev, num_vfs); +#endif + return 0; +} + +/** + * igb_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igb_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igb_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the __igb_resume routine. + **/ +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter lose its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igb_reset(adapter); + wr32(E1000_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igb_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the __igb_resume routine. + */ +static void igb_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (igb_up(adapter)) { + dev_err(&pdev->dev, "igb_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); +} + +/** + * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table + * @adapter: Pointer to adapter structure + * @index: Index of the RAR entry which need to be synced with MAC table + **/ +static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rar_low, rar_high; + u8 *addr = adapter->mac_table[index].addr; + + /* HW expects these to be in network order when they are plugged + * into the registers which are little endian. In order to guarantee + * that ordering we need to do an leXX_to_cpup here in order to be + * ready for the byteswap that occurs with writel + */ + rar_low = le32_to_cpup((__le32 *)(addr)); + rar_high = le16_to_cpup((__le16 *)(addr + 4)); + + /* Indicate to hardware the Address is Valid. */ + if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) { + if (is_valid_ether_addr(addr)) + rar_high |= E1000_RAH_AV; + + if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR) + rar_high |= E1000_RAH_ASEL_SRC_ADDR; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: + if (adapter->mac_table[index].state & + IGB_MAC_STATE_QUEUE_STEERING) + rar_high |= E1000_RAH_QSEL_ENABLE; + + rar_high |= E1000_RAH_POOL_1 * + adapter->mac_table[index].queue; + break; + default: + rar_high |= E1000_RAH_POOL_1 << + adapter->mac_table[index].queue; + break; + } + } + + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +static int igb_set_vf_mac(struct igb_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + struct e1000_hw *hw = &adapter->hw; + /* VF MAC addresses start at end of receive addresses and moves + * towards the first, as a result a collision should not be possible + */ + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses; + + ether_addr_copy(vf_mac_addr, mac_addr); + ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr); + adapter->mac_table[rar_entry].queue = vf; + adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE; + igb_rar_set_index(adapter, rar_entry); + + return 0; +} + +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC + * flag and allows to overwrite the MAC via VF netdev. This + * is necessary to allow libvirt a way to restore the original + * MAC after unbinding vfio-pci and reloading igbvf after shutting + * down a VM. + */ + if (is_zero_ether_addr(mac)) { + adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, + "remove administratively set MAC on VF %d\n", + vf); + } else if (is_valid_ether_addr(mac)) { + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", + mac, vf); + dev_info(&adapter->pdev->dev, + "Reload the VF driver to make this change effective."); + /* Generate additional warning if PF is down */ + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { + return -EINVAL; + } + return igb_set_vf_mac(adapter, vf, mac); +} + +static int igb_link_mbps(int internal_link_speed) +{ + switch (internal_link_speed) { + case SPEED_100: + return 100; + case SPEED_1000: + return 1000; + default: + return 0; + } +} + +static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, + int link_speed) +{ + int rf_dec, rf_int; + u32 bcnrc_val; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); + rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) / + tx_rate; + + bcnrc_val = E1000_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) & + E1000_RTTBCNRC_RF_INT_MASK); + bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ + /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. + */ + wr32(E1000_RTTBCNRM, 0x14); + wr32(E1000_RTTBCNRC, bcnrc_val); +} + +static void igb_check_vf_rate_limit(struct igb_adapter *adapter) +{ + int actual_link_speed, i; + bool reset_rate = false; + + /* VF TX rate limit was not set or not supported */ + if ((adapter->vf_rate_link_speed == 0) || + (adapter->hw.mac.type != e1000_82576)) + return; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if (actual_link_speed != adapter->vf_rate_link_speed) { + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + if (reset_rate) + adapter->vf_data[i].tx_rate = 0; + + igb_set_vf_rate_limit(&adapter->hw, i, + adapter->vf_data[i].tx_rate, + actual_link_speed); + } +} + +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int actual_link_speed; + + if (hw->mac.type != e1000_82576) + return -EOPNOTSUPP; + + if (min_tx_rate) + return -EINVAL; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if ((vf >= adapter->vfs_allocated_count) || + (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || + (max_tx_rate < 0) || + (max_tx_rate > actual_link_speed)) + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; + adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; + igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); + + return 0; +} + +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 reg_val, reg_offset; + + if (!adapter->vfs_allocated_count) + return -EOPNOTSUPP; + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; + reg_val = rd32(reg_offset); + if (setting) + reg_val |= (BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); + else + reg_val &= ~(BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); + wr32(reg_offset, reg_val); + + adapter->vf_data[vf].spoofchk_enabled = setting; + return 0; +} + +static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + if (adapter->vf_data[vf].trusted == setting) + return 0; + + adapter->vf_data[vf].trusted = setting; + + dev_info(&adapter->pdev->dev, "VF %u is %strusted\n", + vf, setting ? "" : "not "); + return 0; +} + +static int igb_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); + ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; + ivi->min_tx_rate = 0; + ivi->vlan = adapter->vf_data[vf].pf_vlan; + ivi->qos = adapter->vf_data[vf].pf_qos; + ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; + ivi->trusted = adapter->vf_data[vf].trusted; + return 0; +} + +static void igb_vmm_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: + case e1000_i211: + case e1000_i354: + default: + /* replication is not supported for 82575 */ + return; + case e1000_82576: + /* notify HW that the MAC is adding vlan tags */ + reg = rd32(E1000_DTXCTL); + reg |= E1000_DTXCTL_VLAN_ADDED; + wr32(E1000_DTXCTL, reg); + fallthrough; + case e1000_82580: + /* enable replication vlan tag stripping */ + reg = rd32(E1000_RPLOLR); + reg |= E1000_RPLOLR_STRVLAN; + wr32(E1000_RPLOLR, reg); + fallthrough; + case e1000_i350: + /* none of the above registers are supported by i350 */ + break; + } + + if (adapter->vfs_allocated_count) { + igb_vmdq_set_loopback_pf(hw, true); + igb_vmdq_set_replication_pf(hw, true); + igb_vmdq_set_anti_spoofing_pf(hw, true, + adapter->vfs_allocated_count); + } else { + igb_vmdq_set_loopback_pf(hw, false); + igb_vmdq_set_replication_pf(hw, false); + } +} + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) +{ + struct e1000_hw *hw = &adapter->hw; + u32 dmac_thr; + u16 hwm; + u32 reg; + + if (hw->mac.type > e1000_82580) { + if (adapter->flags & IGB_FLAG_DMAC) { + /* force threshold to 0. */ + wr32(E1000_DMCTXTH, 0); + + /* DMA Coalescing high water mark needs to be greater + * than the Rx threshold. Set hwm to PBA - max frame + * size in 16B units, capping it at PBA - 6KB. + */ + hwm = 64 * (pba - 6); + reg = rd32(E1000_FCRTC); + reg &= ~E1000_FCRTC_RTH_COAL_MASK; + reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) + & E1000_FCRTC_RTH_COAL_MASK); + wr32(E1000_FCRTC, reg); + + /* Set the DMA Coalescing Rx threshold to PBA - 2 * max + * frame size, capping it at PBA - 10KB. + */ + dmac_thr = pba - 10; + reg = rd32(E1000_DMACR); + reg &= ~E1000_DMACR_DMACTHR_MASK; + reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); + + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + + /* watchdog timer= +-1000 usec in 32usec intervals */ + reg |= (1000 >> 5); + + /* Disable BMC-to-OS Watchdog Enable */ + if (hw->mac.type != e1000_i354) + reg &= ~E1000_DMACR_DC_BMC2OSW_EN; + wr32(E1000_DMACR, reg); + + /* no lower threshold to disable + * coalescing(smart fifb)-UTRESH=0 + */ + wr32(E1000_DMCRTRH, 0); + + reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); + + wr32(E1000_DMCTLX, reg); + + /* free space in tx packet buffer to wake from + * DMA coal + */ + wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - + (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + } + + if (hw->mac.type >= e1000_i210 || + (adapter->flags & IGB_FLAG_DMAC)) { + reg = rd32(E1000_PCIEMISC); + reg |= E1000_PCIEMISC_LX_DECISION; + wr32(E1000_PCIEMISC, reg); + } /* endif adapter->dmac is not disabled */ + } else if (hw->mac.type == e1000_82580) { + u32 reg = rd32(E1000_PCIEMISC); + + wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); + wr32(E1000_DMACR, 0); + } +} + +/** + * igb_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = 0; + + if (!this_client) + return E1000_ERR_I2C; + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + + status = i2c_smbus_read_byte_data(this_client, byte_offset); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status < 0) + return E1000_ERR_I2C; + else { + *data = status; + return 0; + } +} + +/** + * igb_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = E1000_SWFW_PHY0_SM; + + if (!this_client) + return E1000_ERR_I2C; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + status = i2c_smbus_write_byte_data(this_client, byte_offset, data); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status) + return E1000_ERR_I2C; + else + return 0; + +} + +int igb_reinit_queues(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (netif_running(netdev)) + igb_close(netdev); + + igb_reset_interrupt_capability(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igb_open(netdev); + + return err; +} + +static void igb_nfc_filter_exit(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_erase_filter(adapter, rule); + + hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) + igb_erase_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} + +static void igb_nfc_filter_restore(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_add_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} +/* igb_main.c */ diff --git a/devices/igb/igb_main-6.1-orig.c b/devices/igb/igb_main-6.1-orig.c new file mode 100644 index 00000000..18ffbc89 --- /dev/null +++ b/devices/igb/igb_main-6.1-orig.c @@ -0,0 +1,10162 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007 - 2018 Intel Corporation. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_IGB_DCA +#include +#endif +#include +#include "igb.h" + +enum queue_mode { + QUEUE_MODE_STRICT_PRIORITY, + QUEUE_MODE_STREAM_RESERVATION, +}; + +enum tx_queue_prio { + TX_QUEUE_PRIO_HIGH, + TX_QUEUE_PRIO_LOW, +}; + +char igb_driver_name[] = "igb"; +static const char igb_driver_string[] = + "Intel(R) Gigabit Ethernet Network Driver"; +static const char igb_copyright[] = + "Copyright (c) 2007-2014 Intel Corporation."; + +static const struct e1000_info *igb_info_tbl[] = { + [board_82575] = &e1000_82575_info, +}; + +static const struct pci_device_id igb_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, igb_pci_tbl); + +static int igb_setup_all_tx_resources(struct igb_adapter *); +static int igb_setup_all_rx_resources(struct igb_adapter *); +static void igb_free_all_tx_resources(struct igb_adapter *); +static void igb_free_all_rx_resources(struct igb_adapter *); +static void igb_setup_mrqc(struct igb_adapter *); +static int igb_probe(struct pci_dev *, const struct pci_device_id *); +static void igb_remove(struct pci_dev *pdev); +static int igb_sw_init(struct igb_adapter *); +int igb_open(struct net_device *); +int igb_close(struct net_device *); +static void igb_configure(struct igb_adapter *); +static void igb_configure_tx(struct igb_adapter *); +static void igb_configure_rx(struct igb_adapter *); +static void igb_clean_all_tx_rings(struct igb_adapter *); +static void igb_clean_all_rx_rings(struct igb_adapter *); +static void igb_clean_tx_ring(struct igb_ring *); +static void igb_clean_rx_ring(struct igb_ring *); +static void igb_set_rx_mode(struct net_device *); +static void igb_update_phy_info(struct timer_list *); +static void igb_watchdog(struct timer_list *); +static void igb_watchdog_task(struct work_struct *); +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); +static void igb_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static int igb_change_mtu(struct net_device *, int); +static int igb_set_mac(struct net_device *, void *); +static void igb_set_uta(struct igb_adapter *adapter, bool set); +static irqreturn_t igb_intr(int irq, void *); +static irqreturn_t igb_intr_msi(int irq, void *); +static irqreturn_t igb_msix_other(int irq, void *); +static irqreturn_t igb_msix_ring(int irq, void *); +#ifdef CONFIG_IGB_DCA +static void igb_update_dca(struct igb_q_vector *); +static void igb_setup_dca(struct igb_adapter *); +#endif /* CONFIG_IGB_DCA */ +static int igb_poll(struct napi_struct *, int); +static bool igb_clean_tx_irq(struct igb_q_vector *, int); +static int igb_clean_rx_irq(struct igb_q_vector *, int); +static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); +static void igb_tx_timeout(struct net_device *, unsigned int txqueue); +static void igb_reset_task(struct work_struct *); +static void igb_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); +static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); +static void igb_restore_vlan(struct igb_adapter *); +static void igb_rar_set_index(struct igb_adapter *, u32); +static void igb_ping_all_vfs(struct igb_adapter *); +static void igb_msg_task(struct igb_adapter *); +static void igb_vmm_control(struct igb_adapter *); +static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); +static void igb_flush_mac_table(struct igb_adapter *); +static int igb_available_rars(struct igb_adapter *, u8); +static void igb_set_default_mac_filter(struct igb_adapter *); +static int igb_uc_sync(struct net_device *, const unsigned char *); +static int igb_uc_unsync(struct net_device *, const unsigned char *); +static void igb_restore_vf_multicasts(struct igb_adapter *adapter); +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos, __be16 vlan_proto); +static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting); +static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, + bool setting); +static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); +static void igb_check_vf_rate_limit(struct igb_adapter *); +static void igb_nfc_filter_exit(struct igb_adapter *adapter); +static void igb_nfc_filter_restore(struct igb_adapter *adapter); + +#ifdef CONFIG_PCI_IOV +static int igb_vf_configure(struct igb_adapter *adapter, int vf); +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); +static int igb_disable_sriov(struct pci_dev *dev); +static int igb_pci_disable_sriov(struct pci_dev *dev); +#endif + +static int igb_suspend(struct device *); +static int igb_resume(struct device *); +static int igb_runtime_suspend(struct device *dev); +static int igb_runtime_resume(struct device *dev); +static int igb_runtime_idle(struct device *dev); +static const struct dev_pm_ops igb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) + SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle) +}; +static void igb_shutdown(struct pci_dev *); +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#ifdef CONFIG_IGB_DCA +static int igb_notify_dca(struct notifier_block *, unsigned long, void *); +static struct notifier_block dca_notifier = { + .notifier_call = igb_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); +#endif /* CONFIG_PCI_IOV */ + +static pci_ers_result_t igb_io_error_detected(struct pci_dev *, + pci_channel_state_t); +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); +static void igb_io_resume(struct pci_dev *); + +static const struct pci_error_handlers igb_err_handler = { + .error_detected = igb_io_error_detected, + .slot_reset = igb_io_slot_reset, + .resume = igb_io_resume, +}; + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); + +static struct pci_driver igb_driver = { + .name = igb_driver_name, + .id_table = igb_pci_tbl, + .probe = igb_probe, + .remove = igb_remove, +#ifdef CONFIG_PM + .driver.pm = &igb_pm_ops, +#endif + .shutdown = igb_shutdown, + .sriov_configure = igb_pci_sriov_configure, + .err_handler = &igb_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); +MODULE_LICENSE("GPL v2"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +struct igb_reg_info { + u32 ofs; + char *name; +}; + +static const struct igb_reg_info igb_reg_info_tbl[] = { + + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* RX Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* igb_regdump - register printout routine */ +static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDLEN(n)); + break; + case E1000_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDH(n)); + break; + case E1000_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDT(n)); + break; + case E1000_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RXDCTL(n)); + break; + case E1000_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAL(n)); + break; + case E1000_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAH(n)); + break; + case E1000_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDBAL(n)); + break; + case E1000_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDBAH(n)); + break; + case E1000_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDLEN(n)); + break; + case E1000_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDH(n)); + break; + case E1000_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDT(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TXDCTL(n)); + break; + default: + pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igb_dump - Print registers, Tx-rings and Rx-rings */ +static void igb_dump(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_info *reginfo; + struct igb_ring *tx_ring; + union e1000_adv_tx_desc *tx_desc; + struct my_u0 { __le64 a; __le64 b; } *u0; + struct igb_ring *rx_ring; + union e1000_adv_rx_desc *rx_desc; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state trans_start\n"); + pr_info("%-15s %016lX %016lX\n", netdev->name, + netdev->state, dev_trans_start(netdev)); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; + reginfo->name; reginfo++) { + igb_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igb_tx_buffer *buffer_info; + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igb_tx_buffer *buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info(" %5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igb_rx_buffer *buffer_info; + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGB_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address(buffer_info->page) + + buffer_info->page_offset, + igb_rx_bufsz(rx_ring), true); + } + } + } + } + +exit: + return; +} + +/** + * igb_get_i2c_data - Reads the I2C SDA data bit + * @data: opaque pointer to adapter struct + * + * Returns the I2C data bit value + **/ +static int igb_get_i2c_data(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return !!(i2cctl & E1000_I2C_DATA_IN); +} + +/** + * igb_set_i2c_data - Sets the I2C data bit + * @data: pointer to hardware structure + * @state: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static void igb_set_i2c_data(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) { + i2cctl |= E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N; + } else { + i2cctl &= ~E1000_I2C_DATA_OE_N; + i2cctl &= ~E1000_I2C_DATA_OUT; + } + + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} + +/** + * igb_set_i2c_clk - Sets the I2C SCL clock + * @data: pointer to hardware structure + * @state: state to set clock + * + * Sets the I2C clock line to state + **/ +static void igb_set_i2c_clk(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) { + i2cctl |= E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N; + } else { + i2cctl &= ~E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} + +/** + * igb_get_i2c_clk - Gets the I2C SCL clock state + * @data: pointer to hardware structure + * + * Gets the I2C clock state + **/ +static int igb_get_i2c_clk(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return !!(i2cctl & E1000_I2C_CLK_IN); +} + +static const struct i2c_algo_bit_data igb_i2c_algo = { + .setsda = igb_set_i2c_data, + .setscl = igb_set_i2c_clk, + .getsda = igb_get_i2c_data, + .getscl = igb_get_i2c_clk, + .udelay = 5, + .timeout = 20, +}; + +/** + * igb_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + **/ +struct net_device *igb_get_hw_dev(struct e1000_hw *hw) +{ + struct igb_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * igb_init_module - Driver Registration Routine + * + * igb_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init igb_init_module(void) +{ + int ret; + + pr_info("%s\n", igb_driver_string); + pr_info("%s\n", igb_copyright); + +#ifdef CONFIG_IGB_DCA + dca_register_notify(&dca_notifier); +#endif + ret = pci_register_driver(&igb_driver); + return ret; +} + +module_init(igb_init_module); + +/** + * igb_exit_module - Driver Exit Cleanup Routine + * + * igb_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit igb_exit_module(void) +{ +#ifdef CONFIG_IGB_DCA + dca_unregister_notify(&dca_notifier); +#endif + pci_unregister_driver(&igb_driver); +} + +module_exit(igb_exit_module); + +#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) +/** + * igb_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + **/ +static void igb_cache_ring_register(struct igb_adapter *adapter) +{ + int i = 0, j = 0; + u32 rbase_offset = adapter->vfs_allocated_count; + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* The queues are allocated for virtualization such that VF 0 + * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. + * In order to avoid collision we start at the first free queue + * and continue consuming queues in the same sequence + */ + if (adapter->vfs_allocated_count) { + for (; i < adapter->rss_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + + Q_IDX_82576(i); + } + fallthrough; + case e1000_82575: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = rbase_offset + j; + break; + } +} + +u32 igb_rd32(struct e1000_hw *hw, u32 reg) +{ + struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (E1000_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igb->netdev; + hw->hw_addr = NULL; + netdev_err(netdev, "PCIe link lost\n"); + WARN(pci_device_is_present(igb->pdev), + "igb: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +/** + * igb_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * This function is intended to handle the writing of the IVAR register + * for adapters 82576 and newer. The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + **/ +static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(E1000_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | E1000_IVAR_VALID) << offset; + + array_wr32(E1000_IVAR0, index, ivar); +} + +#define IGB_N0_QUEUE -1 +static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + int rx_queue = IGB_N0_QUEUE; + int tx_queue = IGB_N0_QUEUE; + u32 msixbm = 0; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case e1000_82575: + /* The 82575 assigns vectors using a bitmask, which matches the + * bitmask for the EICR/EIMS/EIMC registers. To assign one + * or more queues to a vector, we write the appropriate bits + * into the MSIXBM register for that vector. + */ + if (rx_queue > IGB_N0_QUEUE) + msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; + if (tx_queue > IGB_N0_QUEUE) + msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; + array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); + q_vector->eims_value = msixbm; + break; + case e1000_82576: + /* 82576 uses a table that essentially consists of 2 columns + * with 8 rows. The ordering is column-major so we use the + * lower 3 bits as the row index, and the 4th bit as the + * column offset. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue & 0x7, + (rx_queue & 0x8) << 1); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue & 0x7, + ((tx_queue & 0x8) << 1) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* On 82580 and newer adapters the scheme is similar to 82576 + * however instead of ordering column-major we have things + * ordered row-major. So we traverse the table by using + * bit 0 as the column offset, and the remaining bits as the + * row index. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + BUG(); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igb_configure_msix - Configure MSI-X hardware + * @adapter: board private structure to initialize + * + * igb_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void igb_configure_msix(struct igb_adapter *adapter) +{ + u32 tmp; + int i, vector = 0; + struct e1000_hw *hw = &adapter->hw; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case e1000_82575: + tmp = rd32(E1000_CTRL_EXT); + /* enable MSI-X PBA support*/ + tmp |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask interrupts upon ICR read. */ + tmp |= E1000_CTRL_EXT_EIAME; + tmp |= E1000_CTRL_EXT_IRCA; + + wr32(E1000_CTRL_EXT, tmp); + + /* enable msix_other interrupt */ + array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); + adapter->eims_other = E1000_EIMS_OTHER; + + break; + + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_GPIE_PBA | E1000_GPIE_EIAME | + E1000_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | E1000_IVAR_VALID) << 8; + + wr32(E1000_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igb_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igb_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure to initialize + * + * igb_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int igb_request_msix(struct igb_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + struct net_device *netdev = adapter->netdev; + int i, err = 0, vector = 0, free_vector = 0; + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + E1000_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igb_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igb_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + **/ +static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igb_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igb_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igb_free_q_vector. + **/ +static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* Coming from igb_set_interrupt_capability, the vectors are not yet + * allocated. So, q_vector is NULL so we should stop here. + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + netif_napi_del(&q_vector->napi); + +} + +static void igb_reset_interrupt_capability(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + pci_disable_msix(adapter->pdev); + else if (adapter->flags & IGB_FLAG_HAS_MSI) + pci_disable_msi(adapter->pdev); + + while (v_idx--) + igb_reset_q_vector(adapter, v_idx); +} + +/** + * igb_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void igb_free_q_vectors(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igb_reset_q_vector(adapter, v_idx); + igb_free_q_vector(adapter, v_idx); + } +} + +/** + * igb_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: board private structure to initialize + * + * This function resets the device so that it has 0 Rx queues, Tx queues, and + * MSI-X interrupts allocated. + */ +static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) +{ + igb_free_q_vectors(adapter); + igb_reset_interrupt_capability(adapter); +} + +/** + * igb_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) +{ + int err; + int numvecs, i; + + if (!msix) + goto msi_only; + adapter->flags |= IGB_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + if (adapter->vfs_allocated_count) + adapter->num_tx_queues = 1; + else + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + igb_reset_interrupt_capability(adapter); + + /* If we can't do MSI-X, try MSI */ +msi_only: + adapter->flags &= ~IGB_FLAG_HAS_MSIX; +#ifdef CONFIG_PCI_IOV + /* disable SR-IOV for non MSI-X configurations */ + if (adapter->vf_data) { + struct e1000_hw *hw = &adapter->hw; + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); + msleep(500); + + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&adapter->pdev->dev, "IOV Disabled\n"); + } +#endif + adapter->vfs_allocated_count = 0; + adapter->rss_queues = 1; + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGB_FLAG_HAS_MSI; +} + +static void igb_add_ring(struct igb_ring *ring, + struct igb_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igb_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int igb_alloc_q_vector(struct igb_adapter *adapter, + int v_count, int v_idx, + int txr_count, int txr_idx, + int rxr_count, int rxr_idx) +{ + struct igb_q_vector *q_vector; + struct igb_ring *ring; + int ring_count; + size_t size; + + /* igb only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + size = struct_size(q_vector, ring, ring_count); + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) { + q_vector = kzalloc(size, GFP_KERNEL); + } else if (size > ksize(q_vector)) { + struct igb_q_vector *new_q_vector; + + new_q_vector = kzalloc(size, GFP_KERNEL); + if (new_q_vector) + kfree_rcu(q_vector, rcu); + q_vector = new_q_vector; + } else { + memset(q_vector, 0, size); + } + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igb_add_ring(ring, &q_vector->tx); + + /* For 82575, context index must be unique per ring. */ + if (adapter->hw.mac.type == e1000_82575) + set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + ring->cbs_enable = false; + ring->idleslope = 0; + ring->sendslope = 0; + ring->hicredit = 0; + ring->locredit = 0; + + u64_stats_init(&ring->tx_syncp); + u64_stats_init(&ring->tx_syncp2); + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igb_add_ring(ring, &q_vector->rx); + + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); + + /* On i350, i354, i210, and i211, loopback VLAN packets + * have the tag byte-swapped. + */ + if (adapter->hw.mac.type >= e1000_i350) + set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + u64_stats_init(&ring->rx_syncp); + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + + +/** + * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int igb_alloc_q_vectors(struct igb_adapter *adapter) +{ + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igb_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * This function initializes the interrupts and allocates all of the queues. + **/ +static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + + igb_set_interrupt_capability(adapter, msix); + + err = igb_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igb_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igb_reset_interrupt_capability(adapter); + return err; +} + +/** + * igb_request_irq - initialize interrupts + * @adapter: board private structure to initialize + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igb_request_irq(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + err = igb_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + igb_clear_interrupt_scheme(adapter); + err = igb_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + + igb_setup_all_tx_resources(adapter); + igb_setup_all_rx_resources(adapter); + igb_configure(adapter); + } + + igb_assign_vector(adapter->q_vector[0], 0); + + if (adapter->flags & IGB_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, igb_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igb_reset_interrupt_capability(adapter); + adapter->flags &= ~IGB_FLAG_HAS_MSI; + } + + err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + dev_err(&pdev->dev, "Error %d getting interrupt\n", + err); + +request_done: + return err; +} + +static void igb_free_irq(struct igb_adapter *adapter) +{ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igb_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void igb_irq_disable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* we need to be careful when disabling interrupts. The VFs are also + * mapped into these registers and so clearing the bits can cause + * issues on the VF drivers so we only need to clear what we set + */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 regval = rd32(E1000_EIAM); + + wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); + wr32(E1000_EIMC, adapter->eims_enable_mask); + regval = rd32(E1000_EIAC); + wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(E1000_IAM, 0); + wr32(E1000_IMC, ~0); + wrfl(); + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * igb_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void igb_irq_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; + u32 regval = rd32(E1000_EIAC); + + wr32(E1000_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(E1000_EIAM); + wr32(E1000_EIAM, regval | adapter->eims_enable_mask); + wr32(E1000_EIMS, adapter->eims_enable_mask); + if (adapter->vfs_allocated_count) { + wr32(E1000_MBVFIMR, 0xFF); + ims |= E1000_IMS_VMMB; + } + wr32(E1000_IMS, ims); + } else { + wr32(E1000_IMS, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + wr32(E1000_IAM, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + } +} + +static void igb_update_mng_vlan(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 pf_id = adapter->vfs_allocated_count; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + /* add VID to filter table */ + igb_vfta_set(hw, vid, pf_id, true, true); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; + } + + if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) { + /* remove VID from filter table */ + igb_vfta_set(hw, vid, pf_id, false, true); + } +} + +/** + * igb_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + **/ +static void igb_release_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); +} + +/** + * igb_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + **/ +static void igb_get_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); +} + +static void enable_fqtss(struct igb_adapter *adapter, bool enable) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + + WARN_ON(hw->mac.type != e1000_i210); + + if (enable) + adapter->flags |= IGB_FLAG_FQTSS; + else + adapter->flags &= ~IGB_FLAG_FQTSS; + + if (netif_running(netdev)) + schedule_work(&adapter->reset_task); +} + +static bool is_fqtss_enabled(struct igb_adapter *adapter) +{ + return (adapter->flags & IGB_FLAG_FQTSS) ? true : false; +} + +static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue, + enum tx_queue_prio prio) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 4); + + val = rd32(E1000_I210_TXDCTL(queue)); + + if (prio == TX_QUEUE_PRIO_HIGH) + val |= E1000_TXDCTL_PRIORITY; + else + val &= ~E1000_TXDCTL_PRIORITY; + + wr32(E1000_I210_TXDCTL(queue), val); +} + +static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + + val = rd32(E1000_I210_TQAVCC(queue)); + + if (mode == QUEUE_MODE_STREAM_RESERVATION) + val |= E1000_TQAVCC_QUEUEMODE; + else + val &= ~E1000_TQAVCC_QUEUEMODE; + + wr32(E1000_I210_TQAVCC(queue), val); +} + +static bool is_any_cbs_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->cbs_enable) + return true; + } + + return false; +} + +static bool is_any_txtime_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->launchtime_enable) + return true; + } + + return false; +} + +/** + * igb_config_tx_modes - Configure "Qav Tx mode" features on igb + * @adapter: pointer to adapter struct + * @queue: queue number + * + * Configure CBS and Launchtime for a given hardware queue. + * Parameters are retrieved from the correct Tx ring, so + * igb_save_cbs_params() and igb_save_txtime_params() should be used + * for setting those correctly prior to this function being called. + **/ +static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_ring *ring; + u32 tqavcc, tqavctrl; + u16 value; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + ring = adapter->tx_ring[queue]; + + /* If any of the Qav features is enabled, configure queues as SR and + * with HIGH PRIO. If none is, then configure them with LOW PRIO and + * as SP. + */ + if (ring->cbs_enable || ring->launchtime_enable) { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); + set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + } else { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); + set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); + } + + /* If CBS is enabled, set DataTranARB and config its parameters. */ + if (ring->cbs_enable || queue == 0) { + /* i210 does not allow the queue 0 to be in the Strict + * Priority mode while the Qav mode is enabled, so, + * instead of disabling strict priority mode, we give + * queue 0 the maximum of credits possible. + * + * See section 8.12.19 of the i210 datasheet, "Note: + * Queue0 QueueMode must be set to 1b when + * TransmitMode is set to Qav." + */ + if (queue == 0 && !ring->cbs_enable) { + /* max "linkspeed" idleslope in kbps */ + ring->idleslope = 1000000; + ring->hicredit = ETH_FRAME_LEN; + } + + /* Always set data transfer arbitration to credit-based + * shaper algorithm on TQAVCTRL if CBS is enabled for any of + * the queues. + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + + /* According to i210 datasheet section 7.2.7.7, we should set + * the 'idleSlope' field from TQAVCC register following the + * equation: + * + * For 100 Mbps link speed: + * + * value = BW * 0x7735 * 0.2 (E1) + * + * For 1000Mbps link speed: + * + * value = BW * 0x7735 * 2 (E2) + * + * E1 and E2 can be merged into one equation as shown below. + * Note that 'link-speed' is in Mbps. + * + * value = BW * 0x7735 * 2 * link-speed + * -------------- (E3) + * 1000 + * + * 'BW' is the percentage bandwidth out of full link speed + * which can be found with the following equation. Note that + * idleSlope here is the parameter from this function which + * is in kbps. + * + * BW = idleSlope + * ----------------- (E4) + * link-speed * 1000 + * + * That said, we can come up with a generic equation to + * calculate the value we should set it TQAVCC register by + * replacing 'BW' in E3 by E4. The resulting equation is: + * + * value = idleSlope * 0x7735 * 2 * link-speed + * ----------------- -------------- (E5) + * link-speed * 1000 1000 + * + * 'link-speed' is present in both sides of the fraction so + * it is canceled out. The final equation is the following: + * + * value = idleSlope * 61034 + * ----------------- (E6) + * 1000000 + * + * NOTE: For i210, given the above, we can see that idleslope + * is represented in 16.38431 kbps units by the value at + * the TQAVCC register (1Gbps / 61034), which reduces + * the granularity for idleslope increments. + * For instance, if you want to configure a 2576kbps + * idleslope, the value to be written on the register + * would have to be 157.23. If rounded down, you end + * up with less bandwidth available than originally + * required (~2572 kbps). If rounded up, you end up + * with a higher bandwidth (~2589 kbps). Below the + * approach we take is to always round up the + * calculated value, so the resulting bandwidth might + * be slightly higher for some configurations. + */ + value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); + + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + tqavcc |= value; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + wr32(E1000_I210_TQAVHC(queue), + 0x80000000 + ring->hicredit * 0x7735); + } else { + + /* Set idleSlope to zero. */ + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + /* Set hiCredit to zero. */ + wr32(E1000_I210_TQAVHC(queue), 0); + + /* If CBS is not enabled for any queues anymore, then return to + * the default state of Data Transmission Arbitration on + * TQAVCTRL. + */ + if (!is_any_cbs_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* If LaunchTime is enabled, set DataTranTIM. */ + if (ring->launchtime_enable) { + /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled + * for any of the SR queues, and configure fetchtime delta. + * XXX NOTE: + * - LaunchTime will be enabled for all SR queues. + * - A fixed offset can be added relative to the launch + * time of all packets if configured at reg LAUNCH_OS0. + * We are keeping it as 0 for now (default value). + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANTIM | + E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } else { + /* If Launchtime is not enabled for any SR queues anymore, + * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta, + * effectively disabling Launchtime. + */ + if (!is_any_txtime_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM; + tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* XXX: In i210 controller the sendSlope and loCredit parameters from + * CBS are not configurable by software so we don't do any 'controller + * configuration' in respect to these parameters. + */ + + netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", + ring->cbs_enable ? "enabled" : "disabled", + ring->launchtime_enable ? "enabled" : "disabled", + queue, + ring->idleslope, ring->sendslope, + ring->hicredit, ring->locredit); +} + +static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, + bool enable) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + +static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +/** + * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable + * @adapter: pointer to adapter struct + * + * Configure TQAVCTRL register switching the controller's Tx mode + * if FQTSS mode is enabled or disabled. Additionally, will issue + * a call to igb_config_tx_modes() per queue so any previously saved + * Tx parameters are applied. + **/ +static void igb_setup_tx_mode(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 val; + + /* Only i210 controller supports changing the transmission mode. */ + if (hw->mac.type != e1000_i210) + return; + + if (is_fqtss_enabled(adapter)) { + int i, max_queue; + + /* Configure TQAVCTRL register: set transmit mode to 'Qav', + * set data fetch arbitration to 'round robin', set SP_WAIT_SR + * so SP queues wait for SR ones. + */ + val = rd32(E1000_I210_TQAVCTRL); + val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR; + val &= ~E1000_TQAVCTRL_DATAFETCHARB; + wr32(E1000_I210_TQAVCTRL, val); + + /* Configure Tx and Rx packet buffers sizes as described in + * i210 datasheet section 7.2.7.7. + */ + val = rd32(E1000_TXPBS); + val &= ~I210_TXPBSIZE_MASK; + val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB | + I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB; + wr32(E1000_TXPBS, val); + + val = rd32(E1000_RXPBS); + val &= ~I210_RXPBSIZE_MASK; + val |= I210_RXPBSIZE_PB_30KB; + wr32(E1000_RXPBS, val); + + /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ + * register should not exceed the buffer size programmed in + * TXPBS. The smallest buffer size programmed in TXPBS is 4kB + * so according to the datasheet we should set MAX_TPKT_SIZE to + * 4kB / 64. + * + * However, when we do so, no frame from queue 2 and 3 are + * transmitted. It seems the MAX_TPKT_SIZE should not be great + * or _equal_ to the buffer size programmed in TXPBS. For this + * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64. + */ + val = (4096 - 1) / 64; + wr32(E1000_I210_DTXMXPKTSZ, val); + + /* Since FQTSS mode is enabled, apply any CBS configuration + * previously set. If no previous CBS configuration has been + * done, then the initial configuration is applied, which means + * CBS is disabled. + */ + max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ? + adapter->num_tx_queues : I210_SR_QUEUES_NUM; + + for (i = 0; i < max_queue; i++) { + igb_config_tx_modes(adapter, i); + } + } else { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT); + + val = rd32(E1000_I210_TQAVCTRL); + /* According to Section 8.12.21, the other flags we've set when + * enabling FQTSS are not relevant when disabling FQTSS so we + * don't set they here. + */ + val &= ~E1000_TQAVCTRL_XMIT_MODE; + wr32(E1000_I210_TQAVCTRL, val); + } + + netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ? + "enabled" : "disabled"); +} + +/** + * igb_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void igb_configure(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + igb_get_hw_control(adapter); + igb_set_rx_mode(netdev); + igb_setup_tx_mode(adapter); + + igb_restore_vlan(adapter); + + igb_setup_tctl(adapter); + igb_setup_mrqc(adapter); + igb_setup_rctl(adapter); + + igb_nfc_filter_restore(adapter); + igb_configure_tx(adapter); + igb_configure_rx(adapter); + + igb_rx_fifo_flush_82575(&adapter->hw); + + /* call igb_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); + } +} + +/** + * igb_power_up_link - Power up the phy/serdes link + * @adapter: address of board private structure + **/ +void igb_power_up_link(struct igb_adapter *adapter) +{ + igb_reset_phy(&adapter->hw); + + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_up_phy_copper(&adapter->hw); + else + igb_power_up_serdes_link_82575(&adapter->hw); + + igb_setup_link(&adapter->hw); +} + +/** + * igb_power_down_link - Power down the phy/serdes link + * @adapter: address of board private structure + */ +static void igb_power_down_link(struct igb_adapter *adapter) +{ + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_down_phy_copper_82575(&adapter->hw); + else + igb_shutdown_serdes_link_82575(&adapter->hw); +} + +/** + * igb_check_swap_media - Detect and switch function for Media Auto Sense + * @adapter: address of the board private structure + **/ +static void igb_check_swap_media(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext, connsw; + bool swap_now = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + connsw = rd32(E1000_CONNSW); + + /* need to live swap if current media is copper and we have fiber/serdes + * to go to. + */ + + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { + swap_now = true; + } else if ((hw->phy.media_type != e1000_media_type_copper) && + !(connsw & E1000_CONNSW_SERDESD)) { + /* copper signal takes time to appear */ + if (adapter->copper_tries < 4) { + adapter->copper_tries++; + connsw |= E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + return; + } else { + adapter->copper_tries = 0; + if ((connsw & E1000_CONNSW_PHYSD) && + (!(connsw & E1000_CONNSW_PHY_PDN))) { + swap_now = true; + connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + } + } + } + + if (!swap_now) + return; + + switch (hw->phy.media_type) { + case e1000_media_type_copper: + netdev_info(adapter->netdev, + "MAS: changing media to fiber/serdes\n"); + ctrl_ext |= + E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + adapter->copper_tries = 0; + break; + case e1000_media_type_internal_serdes: + case e1000_media_type_fiber: + netdev_info(adapter->netdev, + "MAS: changing media to copper\n"); + ctrl_ext &= + ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + break; + default: + /* shouldn't get here during regular operation */ + netdev_err(adapter->netdev, + "AMS: Invalid media type found, returning\n"); + break; + } + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +/** + * igb_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + **/ +int igb_up(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* hardware has been reset, we need to reload some things */ + igb_configure(adapter); + + clear_bit(__IGB_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + igb_configure_msix(adapter); + else + igb_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(E1000_TSICR); + rd32(E1000_ICR); + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + + if ((adapter->flags & IGB_FLAG_EEE) && + (!hw->dev_spec._82575.eee_disable)) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + + return 0; +} + +void igb_down(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__IGB_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = rd32(E1000_RCTL); + wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + igb_nfc_filter_exit(adapter); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* disable transmits in the hardware */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_EN; + wr32(E1000_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 11000); + + igb_irq_disable(adapter); + + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igb_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + + igb_clean_all_tx_rings(adapter); + igb_clean_all_rx_rings(adapter); +#ifdef CONFIG_IGB_DCA + + /* since we reset the hardware DCA settings were cleared */ + igb_setup_dca(adapter); +#endif +} + +void igb_reinit_locked(struct igb_adapter *adapter) +{ + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igb_down(adapter); + igb_up(adapter); + clear_bit(__IGB_RESETTING, &adapter->state); +} + +/** igb_enable_mas - Media Autosense re-enable after swap + * + * @adapter: adapter struct + **/ +static void igb_enable_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 connsw = rd32(E1000_CONNSW); + + /* configure for SerDes media detect */ + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_SERDESD))) { + connsw |= E1000_CONNSW_ENRGSRC; + connsw |= E1000_CONNSW_AUTOSENSE_EN; + wr32(E1000_CONNSW, connsw); + wrfl(); + } +} + +#ifdef CONFIG_IGB_HWMON +/** + * igb_set_i2c_bb - Init I2C interface + * @hw: pointer to hardware structure + **/ +static void igb_set_i2c_bb(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 i2cctl; + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_I2C_ENA; + wr32(E1000_CTRL_EXT, ctrl_ext); + wrfl(); + + i2cctl = rd32(E1000_I2CPARAMS); + i2cctl |= E1000_I2CBB_EN + | E1000_I2C_CLK_OE_N + | E1000_I2C_DATA_OE_N; + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} +#endif + +void igb_reset(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + switch (mac->type) { + case e1000_i350: + case e1000_i354: + case e1000_82580: + pba = rd32(E1000_RXPBS); + pba = igb_rxpbs_adjust_82580(pba); + break; + case e1000_82576: + pba = rd32(E1000_RXPBS); + pba &= E1000_RXPBS_SIZE_MASK_82576; + break; + case e1000_82575: + case e1000_i210: + case e1000_i211: + default: + pba = E1000_PBA_34K; + break; + } + + if (mac->type == e1000_82575) { + u32 min_rx_space, min_tx_space, needed_tx_space; + + /* write Rx PBA so that hardware can report correct Tx PBA */ + wr32(E1000_PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024); + + /* The Tx FIFO also stores 16 bytes of information about the Tx + * but don't include Ethernet FCS because hardware appends it. + * We only need to round down to the nearest 512 byte block + * count since the value we care about is 2 frames, not 1. + */ + min_tx_space = adapter->max_frame_size; + min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN; + min_tx_space = DIV_ROUND_UP(min_tx_space, 512); + + /* upper 16 bits has Tx packet buffer allocation size in KB */ + needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16); + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation. + */ + if (needed_tx_space < pba) { + pba -= needed_tx_space; + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + + /* adjust PBA for jumbo frames */ + wr32(E1000_PBA, pba); + } + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + /* disable receive for all VFs and wait one second */ + if (adapter->vfs_allocated_count) { + int i; + + for (i = 0 ; i < adapter->vfs_allocated_count; i++) + adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; + + /* ping all the active vfs to let them know we are going down */ + igb_ping_all_vfs(adapter); + + /* disable transmits and receives */ + wr32(E1000_VFRE, 0); + wr32(E1000_VFTE, 0); + } + + /* Allow time for pending master requests to run */ + hw->mac.ops.reset_hw(hw); + wr32(E1000_WUC, 0); + + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + /* need to resetup here after media swap */ + adapter->ei.get_invariants(hw); + adapter->flags &= ~IGB_FLAG_MEDIA_RESET; + } + if ((mac->type == e1000_82575 || mac->type == e1000_i350) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_enable_mas(adapter); + } + if (hw->mac.ops.init_hw(hw)) + dev_err(&pdev->dev, "Hardware Error\n"); + + /* RAR registers were cleared during init_hw, clear mac table */ + igb_flush_mac_table(adapter); + __dev_uc_unsync(adapter->netdev, NULL); + + /* Recover default RAR entry */ + igb_set_default_mac_filter(adapter); + + /* Flow control settings reset on hardware reset, so guarantee flow + * control is off when forcing speed. + */ + if (!hw->mac.autoneg) + igb_force_mac_fc(hw); + + igb_init_dmac(adapter, pba); +#ifdef CONFIG_IGB_HWMON + /* Re-initialize the thermal sensor on i350 devices. */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (mac->type == e1000_i350 && hw->bus.func == 0) { + /* If present, re-initialize the external thermal sensor + * interface. + */ + if (adapter->ets) + igb_set_i2c_bb(hw); + mac->ops.init_thermal_sensor_thresh(hw); + } + } +#endif + /* Re-establish EEE setting */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (mac->type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + igb_set_eee_i350(hw, true, true); + break; + case e1000_i354: + igb_set_eee_i354(hw, true, true); + break; + default: + break; + } + } + if (!netif_running(adapter->netdev)) + igb_power_down_link(adapter); + + igb_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + + /* Re-enable PTP, where applicable. */ + if (adapter->ptp_flags & IGB_PTP_ENABLED) + igb_ptp_reset(adapter); + + igb_get_phy_info(hw); +} + +static netdev_features_t igb_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igb_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igb_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igb_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) { + struct hlist_node *node2; + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + hlist_for_each_entry_safe(rule, node2, + &adapter->nfc_filter_list, nfc_node) { + igb_erase_filter(adapter, rule); + hlist_del(&rule->nfc_node); + kfree(rule); + } + spin_unlock(&adapter->nfc_lock); + adapter->nfc_filter_count = 0; + } + + netdev->features = features; + + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + + return 1; +} + +static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags, + struct netlink_ext_ack *extack) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + struct igb_adapter *adapter = netdev_priv(dev); + int vfn = adapter->vfs_allocated_count; + + if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn)) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + +#define IGB_MAX_MAC_HDR_LEN 127 +#define IGB_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +igb_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_GSO_UDP_L4 | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_GSO_UDP_L4 | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igb_offload_apply(struct igb_adapter *adapter, s32 queue) +{ + if (!is_fqtss_enabled(adapter)) { + enable_fqtss(adapter, true); + return; + } + + igb_config_tx_modes(adapter, queue); + + if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter)) + enable_fqtss(adapter, false); +} + +static int igb_offload_cbs(struct igb_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* CBS offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* CBS offloading is only supported by queue 0 and queue 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +#define VLAN_PRIO_FULL_MASK (0x07) + +static int igb_parse_cls_flower(struct igb_adapter *adapter, + struct flow_cls_offload *f, + int traffic_class, + struct igb_nfc_filter *input) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; + struct netlink_ext_ack *extack = f->common.extack; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported"); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + if (!is_zero_ether_addr(match.mask->dst)) { + if (!is_broadcast_ether_addr(match.mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, match.key->dst); + } + + if (!is_zero_ether_addr(match.mask->src)) { + if (!is_broadcast_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, match.key->src); + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + if (match.mask->n_proto) { + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; + input->filter.etype = match.key->n_proto; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_priority) { + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + input->filter.vlan_tci = + (__force __be16)match.key->vlan_priority; + } + } + + input->action = traffic_class; + input->cookie = f->cookie; + + return 0; +} + +static int igb_configure_clsflower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + struct netlink_ext_ack *extack = cls_flower->common.extack; + struct igb_nfc_filter *filter, *f; + int err, tc; + + tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); + if (tc < 0) { + NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class"); + return -EINVAL; + } + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + err = igb_parse_cls_flower(adapter, cls_flower, tc, filter); + if (err < 0) + goto err_parse; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in ethtool"); + goto err_locked; + } + } + + hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in cls_flower"); + goto err_locked; + } + } + + err = igb_add_filter(adapter, filter); + if (err < 0) { + NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter"); + goto err_locked; + } + + hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list); + + spin_unlock(&adapter->nfc_lock); + + return 0; + +err_locked: + spin_unlock(&adapter->nfc_lock); + +err_parse: + kfree(filter); + + return err; +} + +static int igb_delete_clsflower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + struct igb_nfc_filter *filter; + int err; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node) + if (filter->cookie == cls_flower->cookie) + break; + + if (!filter) { + err = -ENOENT; + goto out; + } + + err = igb_erase_filter(adapter, filter); + if (err < 0) + goto out; + + hlist_del(&filter->nfc_node); + kfree(filter); + +out: + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, + struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return igb_configure_clsflower(adapter, cls_flower); + case FLOW_CLS_DESTROY: + return igb_delete_clsflower(adapter, cls_flower); + case FLOW_CLS_STATS: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct igb_adapter *adapter = cb_priv; + + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return igb_setup_tc_cls_flower(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igb_offload_txtime(struct igb_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* Launchtime offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* Launchtime offloading is only supported by queues 0 and 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + +static LIST_HEAD(igb_block_cb_list); + +static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_CBS: + return igb_offload_cbs(adapter, type_data); + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &igb_block_cb_list, + igb_setup_tc_block_cb, + adapter, adapter, true); + + case TC_SETUP_QDISC_ETF: + return igb_offload_txtime(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) +{ + int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD; + struct igb_adapter *adapter = netdev_priv(dev); + struct bpf_prog *prog = bpf->prog, *old_prog; + bool running = netif_running(dev); + bool need_reset; + + /* verify igb ring attributes are sufficient for XDP */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + if (frame_size > igb_rx_bufsz(ring)) { + NL_SET_ERR_MSG_MOD(bpf->extack, + "The RX buffer size is too small for the frame size"); + netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n", + igb_rx_bufsz(ring), frame_size); + return -EINVAL; + } + } + + old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); + + /* device is up and bpf is added/removed, must setup the RX queues */ + if (need_reset && running) { + igb_close(dev); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + (void)xchg(&adapter->rx_ring[i]->xdp_prog, + adapter->xdp_prog); + } + + if (old_prog) + bpf_prog_put(old_prog); + + /* bpf is just replaced, RXQ and MTU are already setup */ + if (!need_reset) + return 0; + + if (running) + igb_open(dev); + + return 0; +} + +static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return igb_xdp_setup(dev, xdp); + default: + return -EINVAL; + } +} + +static void igb_xdp_ring_update_tail(struct igb_ring *ring) +{ + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) +{ + unsigned int r_idx = smp_processor_id(); + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct igb_ring *tx_ring; + struct netdev_queue *nq; + u32 ret; + + if (unlikely(!xdpf)) + return IGB_XDP_CONSUMED; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + if (unlikely(!tx_ring)) + return IGB_XDP_CONSUMED; + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); + __netif_tx_unlock(nq); + + return ret; +} + +static int igb_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct igb_ring *tx_ring; + struct netdev_queue *nq; + int nxmit = 0; + int i; + + if (unlikely(test_bit(__IGB_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; + if (unlikely(!tx_ring)) + return -ENXIO; + + nq = txring_txq(tx_ring); + __netif_tx_lock(nq, cpu); + + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); + if (err != IGB_XDP_TX) + break; + nxmit++; + } + + __netif_tx_unlock(nq); + + if (unlikely(flags & XDP_XMIT_FLUSH)) + igb_xdp_ring_update_tail(tx_ring); + + return nxmit; +} + +static const struct net_device_ops igb_netdev_ops = { + .ndo_open = igb_open, + .ndo_stop = igb_close, + .ndo_start_xmit = igb_xmit_frame, + .ndo_get_stats64 = igb_get_stats64, + .ndo_set_rx_mode = igb_set_rx_mode, + .ndo_set_mac_address = igb_set_mac, + .ndo_change_mtu = igb_change_mtu, + .ndo_eth_ioctl = igb_ioctl, + .ndo_tx_timeout = igb_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, + .ndo_set_vf_mac = igb_ndo_set_vf_mac, + .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, + .ndo_set_vf_rate = igb_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, + .ndo_set_vf_trust = igb_ndo_set_vf_trust, + .ndo_get_vf_config = igb_ndo_get_vf_config, + .ndo_fix_features = igb_fix_features, + .ndo_set_features = igb_set_features, + .ndo_fdb_add = igb_ndo_fdb_add, + .ndo_features_check = igb_features_check, + .ndo_setup_tc = igb_setup_tc, + .ndo_bpf = igb_xdp, + .ndo_xdp_xmit = igb_xdp_xmit, +}; + +/** + * igb_set_fw_version - Configure version string for ethtool + * @adapter: adapter struct + **/ +void igb_set_fw_version(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_fw_version fw; + + igb_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (!(igb_get_flash_presence_i210(hw))) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); + break; + } + fallthrough; + default: + /* if option is rom valid, display its version too */ + if (fw.or_valid) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ + } else if (fw.etrack_id != 0X0000) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); + } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); + } + break; + } +} + +/** + * igb_init_mas - init Media Autosense feature if enabled in the NVM + * + * @adapter: adapter struct + **/ +static void igb_init_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 eeprom_data; + + hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); + switch (hw->bus.func) { + case E1000_FUNC_0: + if (eeprom_data & IGB_MAS_ENABLE_0) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_1: + if (eeprom_data & IGB_MAS_ENABLE_1) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_2: + if (eeprom_data & IGB_MAS_ENABLE_2) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_3: + if (eeprom_data & IGB_MAS_ENABLE_3) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + default: + /* Shouldn't get here */ + netdev_err(adapter->netdev, + "MAS: Invalid port configuration, returning\n"); + break; + } +} + +/** + * igb_init_i2c - Init I2C interface + * @adapter: pointer to adapter structure + **/ +static s32 igb_init_i2c(struct igb_adapter *adapter) +{ + s32 status = 0; + + /* I2C interface supported on i350 devices */ + if (adapter->hw.mac.type != e1000_i350) + return 0; + + /* Initialize the i2c bus which is controlled by the registers. + * This bus will use the i2c_algo_bit structure that implements + * the protocol through toggling of the 4 bits in the register. + */ + adapter->i2c_adap.owner = THIS_MODULE; + adapter->i2c_algo = igb_i2c_algo; + adapter->i2c_algo.data = adapter; + adapter->i2c_adap.algo_data = &adapter->i2c_algo; + adapter->i2c_adap.dev.parent = &adapter->pdev->dev; + strscpy(adapter->i2c_adap.name, "igb BB", + sizeof(adapter->i2c_adap.name)); + status = i2c_bit_add_bus(&adapter->i2c_adap); + return status; +} + +/** + * igb_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igb_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igb_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct igb_adapter *adapter; + struct e1000_hw *hw; + u16 eeprom_data = 0; + s32 ret_val; + static int global_quad_port_a; /* global quad port a indication */ + const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; + u8 part_str[E1000_PBANUM_LENGTH]; + int err; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, KERN_ERR "%s (%x:%x) should not be a VF!\n", + pci_name(pdev), pdev->vendor, pdev->device); + return -EINVAL; + } + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + + err = pci_request_mem_regions(pdev, igb_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), + IGB_MAX_TX_QUEUES); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = -EIO; + adapter->io_addr = pci_iomap(pdev, 0, 0); + if (!adapter->io_addr) + goto err_ioremap; + /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igb_netdev_ops; + igb_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC, PHY and NVM function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* setup the private structure */ + err = igb_sw_init(adapter); + if (err) + goto err_sw_init; + + igb_get_bus_info_pcie(hw); + + hw->phy.autoneg_wait_to_complete = false; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = AUTO_ALL_MODES; + hw->phy.disable_polarity_correction = false; + hw->phy.ms_type = e1000_ms_hw_default; + } + + if (igb_check_reset_block(hw)) + dev_info(&pdev->dev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + /* features is initialized to 0 in allocation, it might have bits + * set by igb_sw_init so we should use an or instead of an + * assignment. + */ + netdev->features |= NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; + + if (hw->mac.type >= e1000_82576) + netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; + + if (hw->mac.type >= e1000_i350) + netdev->features |= NETIF_F_HW_TC; + +#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL; + + if (hw->mac.type >= e1000_i350) + netdev->hw_features |= NETIF_F_NTUPLE; + + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + /* make sure the NVM is good , i211/i210 parts can have special NVM + * that doesn't contain a checksum + */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, + "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + break; + default: + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + break; + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + eth_hw_addr_set(netdev, hw->mac.addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + igb_set_default_mac_filter(adapter); + + /* get firmware version for ethtool -i */ + igb_set_fw_version(adapter); + + /* configure RXPBSIZE and TXPBSIZE */ + if (hw->mac.type == e1000_i210) { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + } + + timer_setup(&adapter->watchdog_timer, igb_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igb_reset_task); + INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0x2f; + + hw->fc.requested_mode = e1000_fc_default; + hw->fc.current_mode = e1000_fc_default; + + igb_validate_mdi_setting(hw); + + /* By default, support wake on port A */ + if (hw->bus.func == 0) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* Check the NVM for wake support on non-port A ports */ + if (hw->mac.type >= e1000_82580) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &eeprom_data); + else if (hw->bus.func == 1) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + + if (eeprom_data & IGB_EEPROM_APME) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* now that we have the eeprom settings, apply the special cases where + * the eeprom may be wrong or the board simply won't support wake on + * lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82575GB_QUAD_COPPER: + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + else + adapter->flags |= IGB_FLAG_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + /* If the device can't wake, don't set software support */ + if (!device_can_wakeup(&adapter->pdev->dev)) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + } + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) + adapter->wol |= E1000_WUFC_MAG; + + /* Some vendors want WoL disabled by default, but still supported */ + if ((hw->mac.type == e1000_i350) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + + /* Some vendors want the ability to Use the EEPROM setting as + * enable/disable only, and not for capability + */ + if (((hw->mac.type == e1000_i350) || + (hw->mac.type == e1000_i354)) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (hw->mac.type == e1000_i350) { + if (((pdev->subsystem_device == 0x5001) || + (pdev->subsystem_device == 0x5002)) && + (hw->bus.func == 0)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (pdev->subsystem_device == 0x1F52) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + } + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGB_FLAG_WOL_SUPPORTED); + + /* reset the hardware with the new settings */ + igb_reset(adapter); + + /* Init the I2C interface */ + err = igb_init_i2c(adapter); + if (err) { + dev_err(&pdev->dev, "failed to init i2c interface\n"); + goto err_eeprom; + } + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + +#ifdef CONFIG_IGB_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + } + +#endif +#ifdef CONFIG_IGB_HWMON + /* Initialize the thermal sensor on i350 devices. */ + if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { + u16 ets_word; + + /* Read the NVM to determine if this i350 device supports an + * external thermal sensor. + */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); + if (ets_word != 0x0000 && ets_word != 0xFFFF) + adapter->ets = true; + else + adapter->ets = false; + /* Only enable I2C bit banging if an external thermal + * sensor is supported. + */ + if (adapter->ets) + igb_set_i2c_bb(hw); + hw->mac.ops.init_thermal_sensor_thresh(hw); + if (igb_sysfs_init(adapter)) + dev_err(&pdev->dev, + "failed to allocate sysfs resources\n"); + } else { + adapter->ets = false; + } +#endif + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + if (hw->dev_spec._82575.mas_capable) + igb_init_mas(adapter); + + /* do hw tstamp init after resetting */ + igb_ptp_init(adapter); + + dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); + /* print bus type/speed/width info, not applicable to i354 */ + if (hw->mac.type != e1000_i354) { + dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", + netdev->name, + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : + (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : + "unknown"), + ((hw->bus.width == e1000_bus_width_pcie_x4) ? + "Width x4" : + (hw->bus.width == e1000_bus_width_pcie_x2) ? + "Width x2" : + (hw->bus.width == e1000_bus_width_pcie_x1) ? + "Width x1" : "unknown"), netdev->dev_addr); + } + + if ((hw->mac.type == e1000_82576 && + rd32(E1000_EECD) & E1000_EECD_PRES) || + (hw->mac.type >= e1000_i210 || + igb_get_flash_presence_i210(hw))) { + ret_val = igb_read_part_string(hw, part_str, + E1000_PBANUM_LENGTH); + } else { + ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; + } + + if (ret_val) + strcpy(part_str, "Unknown"); + dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); + dev_info(&pdev->dev, + "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", + (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : + (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", + adapter->num_rx_queues, adapter->num_tx_queues); + if (hw->phy.media_type == e1000_media_type_copper) { + switch (hw->mac.type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + /* Enable EEE for internal copper PHY devices */ + err = igb_set_eee_i350(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + break; + case e1000_i354: + if ((rd32(E1000_CTRL_EXT) & + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + err = igb_set_eee_i354(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + } + break; + default: + break; + } + } + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + + pm_runtime_put_noidle(&pdev->dev); + return 0; + +err_register: + igb_release_hw_control(adapter); + memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); +err_eeprom: + if (!igb_check_reset_block(hw)) + igb_reset_phy(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); +err_sw_init: + kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); + igb_clear_interrupt_scheme(adapter); +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif + pci_iounmap(pdev, adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +#ifdef CONFIG_PCI_IOV +static int igb_disable_sriov(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + + /* reclaim resources allocated to VFs */ + if (adapter->vf_data) { + /* disable iov and allow time for transactions to clear */ + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); + return -EPERM; + } else { + pci_disable_sriov(pdev); + msleep(500); + } + spin_lock_irqsave(&adapter->vfs_lock, flags); + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; + spin_unlock_irqrestore(&adapter->vfs_lock, flags); + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&pdev->dev, "IOV Disabled\n"); + + /* Re-enable DMA Coalescing flag since IOV is turned off */ + adapter->flags |= IGB_FLAG_DMAC; + } + + return 0; +} + +static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + int old_vfs = pci_num_vf(pdev); + struct vf_mac_filter *mac_list; + int err = 0; + int num_vf_mac_filters, i; + + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { + err = -EPERM; + goto out; + } + if (!num_vfs) + goto out; + + if (old_vfs) { + dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", + old_vfs, max_vfs); + adapter->vfs_allocated_count = old_vfs; + } else + adapter->vfs_allocated_count = num_vfs; + + adapter->vf_data = kcalloc(adapter->vfs_allocated_count, + sizeof(struct vf_data_storage), GFP_KERNEL); + + /* if allocation failed then we do not support SR-IOV */ + if (!adapter->vf_data) { + adapter->vfs_allocated_count = 0; + err = -ENOMEM; + goto out; + } + + /* Due to the limited number of RAR entries calculate potential + * number of MAC filters available for the VFs. Reserve entries + * for PF default MAC, PF MAC filters and at least one RAR entry + * for each VF for VF MAC. + */ + num_vf_mac_filters = adapter->hw.mac.rar_entry_count - + (1 + IGB_PF_MAC_FILTERS_RESERVED + + adapter->vfs_allocated_count); + + adapter->vf_mac_list = kcalloc(num_vf_mac_filters, + sizeof(struct vf_mac_filter), + GFP_KERNEL); + + mac_list = adapter->vf_mac_list; + INIT_LIST_HEAD(&adapter->vf_macs.l); + + if (adapter->vf_mac_list) { + /* Initialize list of VF MAC filters */ + for (i = 0; i < num_vf_mac_filters; i++) { + mac_list->vf = -1; + mac_list->free = true; + list_add(&mac_list->l, &adapter->vf_macs.l); + mac_list++; + } + } else { + /* If we could not allocate memory for the VF MAC filters + * we can continue without this feature but warn user. + */ + dev_err(&pdev->dev, + "Unable to allocate memory for VF MAC filter list\n"); + } + + /* only call pci_enable_sriov() if no VFs are allocated already */ + if (!old_vfs) { + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } + dev_info(&pdev->dev, "%d VFs allocated\n", + adapter->vfs_allocated_count); + for (i = 0; i < adapter->vfs_allocated_count; i++) + igb_vf_configure(adapter, i); + + /* DMA Coalescing is not supported in IOV mode. */ + adapter->flags &= ~IGB_FLAG_DMAC; + goto out; + +err_out: + kfree(adapter->vf_mac_list); + adapter->vf_mac_list = NULL; + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; +out: + return err; +} + +#endif +/** + * igb_remove_i2c - Cleanup I2C interface + * @adapter: pointer to adapter structure + **/ +static void igb_remove_i2c(struct igb_adapter *adapter) +{ + /* free the adapter bus structure */ + i2c_del_adapter(&adapter->i2c_adap); +} + +/** + * igb_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igb_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void igb_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pm_runtime_get_noresume(&pdev->dev); +#ifdef CONFIG_IGB_HWMON + igb_sysfs_exit(adapter); +#endif + igb_remove_i2c(adapter); + igb_ptp_stop(adapter); + /* The watchdog timer may be rescheduled, so explicitly + * disable watchdog from being rescheduled. + */ + set_bit(__IGB_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + +#ifdef CONFIG_IGB_DCA + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + dev_info(&pdev->dev, "DCA disabled\n"); + dca_remove_requester(&pdev->dev); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } +#endif + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif + + unregister_netdev(netdev); + + igb_clear_interrupt_scheme(adapter); + + pci_iounmap(pdev, adapter->io_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_mem_regions(pdev); + + kfree(adapter->mac_table); + kfree(adapter->shadow_vfta); + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/** + * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space + * @adapter: board private structure to initialize + * + * This function initializes the vf specific data storage and then attempts to + * allocate the VFs. The reason for ordering it this way is because it is much + * mor expensive time wise to disable SR-IOV than it is to allocate and free + * the memory for the VFs. + **/ +static void igb_probe_vfs(struct igb_adapter *adapter) +{ +#ifdef CONFIG_PCI_IOV + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + + /* Virtualization features not supported on i210 family. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + return; + + /* Of the below we really only want the effect of getting + * IGB_FLAG_HAS_MSIX set (if available), without which + * igb_enable_sriov() has no effect. + */ + igb_set_interrupt_capability(adapter, true); + igb_reset_interrupt_capability(adapter); + + pci_sriov_set_totalvfs(pdev, 7); + igb_enable_sriov(pdev, max_vfs); + +#endif /* CONFIG_PCI_IOV */ +} + +unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned int max_rss_queues; + + /* Determine the maximum number of RSS queues supported. */ + switch (hw->mac.type) { + case e1000_i211: + max_rss_queues = IGB_MAX_RX_QUEUES_I211; + break; + case e1000_82575: + case e1000_i210: + max_rss_queues = IGB_MAX_RX_QUEUES_82575; + break; + case e1000_i350: + /* I350 cannot do RSS and SR-IOV at the same time */ + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 1; + break; + } + fallthrough; + case e1000_82576: + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 2; + break; + } + fallthrough; + case e1000_82580: + case e1000_i354: + default: + max_rss_queues = IGB_MAX_RX_QUEUES; + break; + } + + return max_rss_queues; +} + +static void igb_init_queue_configuration(struct igb_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igb_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igb_set_flag_queue_pairs(adapter, max_rss_queues); +} + +void igb_set_flag_queue_pairs(struct igb_adapter *adapter, + const u32 max_rss_queues) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Determine if we need to pair queues. */ + switch (hw->mac.type) { + case e1000_82575: + case e1000_i211: + /* Device supports enough interrupts without queue pairing. */ + break; + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + default: + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; + break; + } +} + +/** + * igb_sw_init - Initialize general software structures (struct igb_adapter) + * @adapter: board private structure to initialize + * + * igb_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int igb_sw_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGB_DEFAULT_TXD; + adapter->rx_ring_count = IGB_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGB_DEFAULT_ITR; + adapter->tx_itr_setting = IGB_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; + + adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->nfc_lock); + spin_lock_init(&adapter->stats64_lock); + + /* init spinlock to avoid concurrency of VF resources */ + spin_lock_init(&adapter->vfs_lock); +#ifdef CONFIG_PCI_IOV + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + if (max_vfs > 7) { + dev_warn(&pdev->dev, + "Maximum of 7 VFs per PF, using max\n"); + max_vfs = adapter->vfs_allocated_count = 7; + } else + adapter->vfs_allocated_count = max_vfs; + if (adapter->vfs_allocated_count) + dev_warn(&pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); + break; + default: + break; + } +#endif /* CONFIG_PCI_IOV */ + + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGB_FLAG_HAS_MSIX; + + adapter->mac_table = kcalloc(hw->mac.rar_entry_count, + sizeof(struct igb_mac_addr), + GFP_KERNEL); + if (!adapter->mac_table) + return -ENOMEM; + + igb_probe_vfs(adapter); + + igb_init_queue_configuration(adapter); + + /* Setup and initialize a copy of the hw vlan table array */ + adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), + GFP_KERNEL); + if (!adapter->shadow_vfta) + return -ENOMEM; + + /* This call may decrease the number of queues */ + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igb_irq_disable(adapter); + + if (hw->mac.type >= e1000_i350) + adapter->flags &= ~IGB_FLAG_DMAC; + + set_bit(__IGB_DOWN, &adapter->state); + return 0; +} + +/** + * __igb_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: indicates whether we are in a resume call + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int __igb_open(struct net_device *netdev, bool resuming) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + int i; + + /* disallow open during test */ + if (test_bit(__IGB_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = igb_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igb_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igb_power_up_link(adapter); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + igb_configure(adapter); + + err = igb_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(adapter->netdev, + adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); + if (err) + goto err_set_queues; + + /* From here on the code is the same as igb_up() */ + clear_bit(__IGB_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); + + /* Clear any pending interrupts. */ + rd32(E1000_TSICR); + rd32(E1000_ICR); + + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + netif_tx_start_all_queues(netdev); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + + return 0; + +err_set_queues: + igb_free_irq(adapter); +err_req_irq: + igb_release_hw_control(adapter); + igb_power_down_link(adapter); + igb_free_all_rx_resources(adapter); +err_setup_rx: + igb_free_all_tx_resources(adapter); +err_setup_tx: + igb_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igb_open(struct net_device *netdev) +{ + return __igb_open(netdev, false); +} + +/** + * __igb_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: indicates we are in a suspend call + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int __igb_close(struct net_device *netdev, bool suspending) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igb_down(adapter); + igb_free_irq(adapter); + + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + return 0; +} + +int igb_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igb_close(netdev, false); + return 0; +} + +/** + * igb_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int igb_setup_tx_resources(struct igb_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size; + + size = sizeof(struct igb_tx_buffer) * tx_ring->count; + + tx_ring->tx_buffer_info = vmalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_tx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igb_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Tx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + **/ +void igb_setup_tctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which is enabled by default on 82575 and 82576 */ + wr32(E1000_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + igb_config_collision_dist(hw); + + /* Enable transmits */ + tctl |= E1000_TCTL_EN; + + wr32(E1000_TCTL, tctl); +} + +/** + * igb_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + **/ +void igb_configure_tx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txdctl = 0; + u64 tdba = ring->dma; + int reg_idx = ring->reg_idx; + + wr32(E1000_TDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_tx_desc)); + wr32(E1000_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(E1000_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + E1000_TDT(reg_idx); + wr32(E1000_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGB_TX_PTHRESH; + txdctl |= IGB_TX_HTHRESH << 8; + txdctl |= IGB_TX_WTHRESH << 16; + + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct igb_tx_buffer) * ring->count); + + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + wr32(E1000_TXDCTL(reg_idx), txdctl); +} + +/** + * igb_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igb_configure_tx(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* disable the queues */ + for (i = 0; i < adapter->num_tx_queues; i++) + wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0); + + wrfl(); + usleep_range(10000, 20000); + + for (i = 0; i < adapter->num_tx_queues; i++) + igb_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igb_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int igb_setup_rx_resources(struct igb_ring *rx_ring) +{ + struct igb_adapter *adapter = netdev_priv(rx_ring->netdev); + struct device *dev = rx_ring->dev; + int size, res; + + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index, 0); + if (res < 0) { + dev_err(dev, "Failed to register xdp_rxq index %u\n", + rx_ring->queue_index); + return res; + } + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; + + rx_ring->rx_buffer_info = vmalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + rx_ring->xdp_prog = adapter->xdp_prog; + + return 0; + +err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_rx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igb_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Rx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + **/ +static void igb_setup_mrqc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 j, num_rx_queues; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(E1000_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_rx_queues = 2; + break; + default: + break; + } + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGB_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGB_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igb_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + if (adapter->hw.mac.type >= e1000_82576) + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= E1000_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(E1000_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + /* If VMDq is enabled then we set the appropriate mode for that, else + * we default to RSS so that an RSS hash is calculated per packet even + * if we are only using one queue + */ + if (adapter->vfs_allocated_count) { + if (hw->mac.type > e1000_82575) { + /* Set the default pool for the PF's first queue */ + u32 vtctl = rd32(E1000_VT_CTL); + + vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | + E1000_VT_CTL_DISABLE_DEF_POOL); + vtctl |= adapter->vfs_allocated_count << + E1000_VT_CTL_DEFAULT_POOL_SHIFT; + wr32(E1000_VT_CTL, vtctl); + } + if (adapter->rss_queues > 1) + mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ; + else + mrqc |= E1000_MRQC_ENABLE_VMDQ; + } else { + mrqc |= E1000_MRQC_ENABLE_RSS_MQ; + } + igb_vmm_control(adapter); + + wr32(E1000_MRQC, mrqc); +} + +/** + * igb_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +void igb_setup_rctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(E1000_RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* enable stripping of CRC. It's unlikely this will break BMC + * redirection as it did with e1000. Newer features require + * that the HW strips the CRC. + */ + rctl |= E1000_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= E1000_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(E1000_RXDCTL(0), 0); + + /* Attention!!! For SR-IOV PF driver operations you must enable + * queue drop for all VF and PF queues to prevent head of line blocking + * if an un-trusted VF does not provide descriptors to hardware. + */ + if (adapter->vfs_allocated_count) { + /* set all queue drop enable bits */ + wr32(E1000_QDE, ALL_QUEUES); + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + wr32(E1000_RCTL, rctl); +} + +static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, + int vfn) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + if (size > MAX_JUMBO_FRAME_SIZE) + size = MAX_JUMBO_FRAME_SIZE; + + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= size | E1000_VMOLR_LPE; + wr32(E1000_VMOLR(vfn), vmolr); + + return 0; +} + +static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, + int vfn, bool enable) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val, reg; + + if (hw->mac.type < e1000_82576) + return; + + if (hw->mac.type == e1000_i350) + reg = E1000_DVMOLR(vfn); + else + reg = E1000_VMOLR(vfn); + + val = rd32(reg); + if (enable) + val |= E1000_VMOLR_STRVLAN; + else + val &= ~(E1000_VMOLR_STRVLAN); + wr32(reg, val); +} + +static inline void igb_set_vmolr(struct igb_adapter *adapter, + int vfn, bool aupe) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + /* This register exists only on 82576 and newer so if we are older then + * we should exit and do nothing + */ + if (hw->mac.type < e1000_82576) + return; + + vmolr = rd32(E1000_VMOLR(vfn)); + if (aupe) + vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ + else + vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ + + /* clear all bits that might not be set */ + vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); + + if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ + /* for VMDq only allow the VFs and pool 0 to accept broadcast and + * multicast packets + */ + if (vfn <= adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + + wr32(E1000_VMOLR(vfn), vmolr); +} + +/** + * igb_setup_srrctl - configure the split and replication receive control + * registers + * @adapter: Board private structure + * @ring: receive ring to be configured + **/ +void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u32 srrctl = 0; + + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + if (ring_uses_large_buffer(ring)) + srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + if (hw->mac.type >= e1000_82580) + srrctl |= E1000_SRRCTL_TIMESTAMP; + /* Only set Drop Enable if VFs allocated, or we are supporting multiple + * queues and rx flow control is disabled + */ + if (adapter->vfs_allocated_count || + (!(hw->fc.current_mode & e1000_fc_rx_pause) && + adapter->num_rx_queues > 1)) + srrctl |= E1000_SRRCTL_DROP_EN; + + wr32(E1000_SRRCTL(reg_idx), srrctl); +} + +/** + * igb_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + **/ +void igb_configure_rx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + union e1000_adv_rx_desc *rx_desc; + u64 rdba = ring->dma; + int reg_idx = ring->reg_idx; + u32 rxdctl = 0; + + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + + /* disable the queue */ + wr32(E1000_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(E1000_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(E1000_RDBAH(reg_idx), rdba >> 32); + wr32(E1000_RDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + E1000_RDT(reg_idx); + wr32(E1000_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* set descriptor configuration */ + igb_setup_srrctl(adapter, ring); + + /* set filtering for VMDQ pools */ + igb_set_vmolr(adapter, reg_idx & 0x7, true); + + rxdctl |= IGB_RX_PTHRESH; + rxdctl |= IGB_RX_HTHRESH << 8; + rxdctl |= IGB_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igb_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGB_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + wr32(E1000_RXDCTL(reg_idx), rxdctl); +} + +static void igb_set_rx_buffer_len(struct igb_adapter *adapter, + struct igb_ring *rx_ring) +{ + /* set build_skb and buffer size flags */ + clear_ring_build_skb_enabled(rx_ring); + clear_ring_uses_large_buffer(rx_ring); + + if (adapter->flags & IGB_FLAG_RX_LEGACY) + return; + + set_ring_build_skb_enabled(rx_ring); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + return; + + set_ring_uses_large_buffer(rx_ring); +#endif +} + +/** + * igb_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void igb_configure_rx(struct igb_adapter *adapter) +{ + int i; + + /* set the correct pool for the PF default MAC address in entry 0 */ + igb_set_default_mac_filter(adapter); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *rx_ring = adapter->rx_ring[i]; + + igb_set_rx_buffer_len(adapter, rx_ring); + igb_configure_rx_ring(adapter, rx_ring); + } +} + +/** + * igb_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void igb_free_tx_resources(struct igb_ring *tx_ring) +{ + igb_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igb_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void igb_free_all_tx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igb_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void igb_clean_tx_ring(struct igb_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + + while (i != tx_ring->next_to_use) { + union e1000_adv_tx_desc *eop_desc, *tx_desc; + + /* Free all the Tx ring sk_buffs or xdp frames */ + if (tx_buffer->type == IGB_TYPE_SKB) + dev_kfree_skb_any(tx_buffer->skb); + else + xdp_return_frame(tx_buffer->xdpf); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGB_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igb_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_tx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igb_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void igb_free_rx_resources(struct igb_ring *rx_ring) +{ + igb_clean_rx_ring(rx_ring); + + rx_ring->xdp_prog = NULL; + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igb_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void igb_free_all_rx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igb_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void igb_clean_rx_ring(struct igb_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igb_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igb_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * igb_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_rx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igb_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int igb_set_mac(struct net_device *netdev, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igb_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igb_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igb_write_mc_addr_list(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igb_update_mc_addr_list(hw, NULL, 0); + igb_restore_vf_multicasts(adapter); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igb_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static int igb_vlan_promisc_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, pf_id; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + case e1000_i350: + /* VLAN filtering needed for VLAN prio filter */ + if (adapter->netdev->features & NETIF_F_NTUPLE) + break; + fallthrough; + case e1000_82576: + case e1000_82580: + case e1000_i354: + /* VLAN filtering needed for pool filtering */ + if (adapter->vfs_allocated_count) + break; + fallthrough; + default: + return 1; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + return 0; + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + /* Add PF to all active pools */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + vlvf |= BIT(pf_id); + wr32(E1000_VLVF(i), vlvf); + } + +set_vfta: + /* Set all bits in the VLAN filter table array */ + for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;) + hw->mac.ops.write_vfta(hw, i, ~0U); + + /* Set flag so we don't redo unnecessary work */ + adapter->flags |= IGB_FLAG_VLAN_PROMISC; + + return 0; +} + +#define VFTA_BLOCK_SIZE 8 +static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * 32; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); + u32 i, vid, word, bits, pf_id; + + /* guarantee that we don't scrub out management VLAN */ + vid = adapter->mng_vlan_id; + if (vid >= vid_start && vid < vid_end) + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + /* pull VLAN ID from VLVF */ + vid = vlvf & VLAN_VID_MASK; + + /* only concern ourselves with a certain range */ + if (vid < vid_start || vid >= vid_end) + continue; + + if (vlvf & E1000_VLVF_VLANID_ENABLE) { + /* record VLAN ID in VFTA */ + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); + + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + bits = ~BIT(pf_id); + bits &= rd32(E1000_VLVF(i)); + wr32(E1000_VLVF(i), bits); + } + +set_vfta: + /* extract values from active_vlans and write back to VFTA */ + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * 32; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->active_vlans[word] >> bits; + + hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]); + } +} + +static void igb_vlan_promisc_disable(struct igb_adapter *adapter) +{ + u32 i; + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + + for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE) + igb_scrub_vfta(adapter, i); +} + +/** + * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igb_set_rx_mode(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = adapter->vfs_allocated_count; + u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + + /* enable use of UTA filter to force packets to default pool */ + if (hw->mac.type == e1000_82576) + vmolr |= E1000_VMOLR_ROPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igb_write_mc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else if (count) { + vmolr |= E1000_VMOLR_ROMPE; + } + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) { + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } + + /* enable VLAN filtering by default */ + rctl |= E1000_RCTL_VFE; + + /* disable VLAN filtering for modes that require it */ + if ((netdev->flags & IFF_PROMISC) || + (netdev->features & NETIF_F_RXALL)) { + /* if we fail to set all rules then just clear VFE */ + if (igb_vlan_promisc_enable(adapter)) + rctl &= ~E1000_RCTL_VFE; + } else { + igb_vlan_promisc_disable(adapter); + } + + /* update state of unicast, multicast, and VLAN filtering modes */ + rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE | + E1000_RCTL_VFE); + wr32(E1000_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (!adapter->vfs_allocated_count) { + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + rlpml = IGB_MAX_FRAME_BUILD_SKB; + } +#endif + wr32(E1000_RLPML, rlpml); + + /* In order to support SR-IOV and eventually VMDq it is necessary to set + * the VMOLR to enable the appropriate modes. Without this workaround + * we will have issues with VLAN tag stripping not being done for frames + * that are only arriving because we are the default pool + */ + if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) + return; + + /* set UTA to appropriate mode */ + igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE)); + + vmolr |= rd32(E1000_VMOLR(vfn)) & + ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); + + /* enable Rx jumbo frames, restrict as needed to support build_skb */ + vmolr &= ~E1000_VMOLR_RLPML_MASK; +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB) + vmolr |= IGB_MAX_FRAME_BUILD_SKB; + else +#endif + vmolr |= MAX_JUMBO_FRAME_SIZE; + vmolr |= E1000_VMOLR_LPE; + + wr32(E1000_VMOLR(vfn), vmolr); + + igb_restore_vf_multicasts(adapter); +} + +static void igb_check_wvbr(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 wvbr = 0; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + wvbr = rd32(E1000_WVBR); + if (!wvbr) + return; + break; + default: + break; + } + + adapter->wvbr |= wvbr; +} + +#define IGB_STAGGERED_QUEUE_OFFSET 8 + +static void igb_spoof_check(struct igb_adapter *adapter) +{ + int j; + + if (!adapter->wvbr) + return; + + for (j = 0; j < adapter->vfs_allocated_count; j++) { + if (adapter->wvbr & BIT(j) || + adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) { + dev_warn(&adapter->pdev->dev, + "Spoof event(s) detected on VF %d\n", j); + adapter->wvbr &= + ~(BIT(j) | + BIT(j + IGB_STAGGERED_QUEUE_OFFSET)); + } + } +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igb_update_phy_info(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer); + igb_get_phy_info(&adapter->hw); +} + +/** + * igb_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + **/ +bool igb_has_link(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the e1000_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (!hw->mac.get_link_status) + return true; + fallthrough; + case e1000_media_type_internal_serdes: + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + break; + default: + case e1000_media_type_unknown: + break; + } + + if (((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) && + (hw->phy.id == I210_I_PHY_ID)) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) +{ + bool ret = false; + u32 ctrl_ext, thstat; + + /* check for thermal sensor event on i350 copper only */ + if (hw->mac.type == e1000_i350) { + thstat = rd32(E1000_THSTAT); + ctrl_ext = rd32(E1000_CTRL_EXT); + + if ((hw->phy.media_type == e1000_media_type_copper) && + !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) + ret = !!(thstat & event); + } + + return ret; +} + +/** + * igb_check_lvmmc - check for malformed packets received + * and indicated in LVMMC register + * @adapter: pointer to adapter + **/ +static void igb_check_lvmmc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 lvmmc; + + lvmmc = rd32(E1000_LVMMC); + if (lvmmc) { + if (unlikely(net_ratelimit())) { + netdev_warn(adapter->netdev, + "malformed Tx packet detected and dropped, LVMMC:0x%08x\n", + lvmmc); + } + } +} + +/** + * igb_watchdog - Timer Call-back + * @t: pointer to timer_list containing our private info pointer + **/ +static void igb_watchdog(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igb_watchdog_task(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, + struct igb_adapter, + watchdog_task); + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_info *phy = &hw->phy; + struct net_device *netdev = adapter->netdev; + u32 link; + int i; + u32 connsw; + u16 phy_data, retry_count = 20; + + link = igb_has_link(adapter); + + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + /* Force link down if we have fiber to swap to */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + if (hw->phy.media_type == e1000_media_type_copper) { + connsw = rd32(E1000_CONNSW); + if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) + link = 0; + } + } + if (link) { + /* Perform a reset if the media type changed. */ + if (hw->dev_spec._82575.media_changed) { + hw->dev_spec._82575.media_changed = false; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + igb_reset(adapter); + } + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(E1000_CTRL); + /* Links status message must follow this format */ + netdev_info(netdev, + "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && + (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : + (ctrl & E1000_CTRL_RFCE) ? "RX" : + (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGB_FLAG_EEE) && + (adapter->link_duplex == HALF_DUPLEX)) { + dev_info(&adapter->pdev->dev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); + adapter->hw.dev_spec._82575.eee_disable = true; + adapter->flags &= ~IGB_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igb_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_LINK_THROTTLE)) + netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + if (adapter->link_speed != SPEED_1000 || + !hw->phy.ops.read_reg) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igb_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); + } + } else { + dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + igb_ping_all_vfs(adapter); + igb_check_vf_rate_limit(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_PWR_DOWN)) { + netdev_err(netdev, "The network adapter was stopped because it overheated\n"); + } + + /* Links status message must follow this format */ + netdev_info(netdev, "igb: %s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + igb_ping_all_vfs(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* link is down, time to check for alternate media */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + } + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *tx_ring = adapter->tx_ring[i]; + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(E1000_EICS, eics); + } else { + wr32(E1000_ICS, E1000_ICS_RXDMT0); + } + + igb_spoof_check(adapter); + igb_ptp_rx_hang(adapter); + igb_ptp_tx_hang(adapter); + + /* Check LVMMC register on i350/i354 only */ + if ((adapter->hw.mac.type == e1000_i350) || + (adapter->hw.mac.type == e1000_i354)) + igb_check_lvmmc(adapter); + + /* Reset the timer */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * igb_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igb_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + **/ +static void igb_update_ring_itr(struct igb_q_vector *q_vector) +{ + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + struct igb_adapter *adapter = q_vector->adapter; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + if (adapter->link_speed != SPEED_1000) { + new_val = IGB_4K_ITR; + goto set_itr_val; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if ((avg_wire_size > 300) && (avg_wire_size < 1200)) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGB_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGB_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +/** + * igb_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + **/ +static void igb_update_itr(struct igb_q_vector *q_vector, + struct igb_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes/packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igb_set_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = IGB_4K_ITR; + goto set_itr_now; + } + + igb_update_itr(q_vector, &q_vector->tx); + igb_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGB_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGB_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGB_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct e1000_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + struct timespec64 ts; + + context_desc = IGB_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; + + /* For 82575, context index must be unique per ring. */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + ts = ktime_to_timespec64(first->skb->tstamp); + skb_txtime_consumed(first->skb); + context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); + } else { + context_desc->seqnum_seed = 0; + } +} + +static int igb_tso(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? + E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM | + IGB_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; + + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGB_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; + + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); +} + +#define IGB_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = E1000_ADVTXD_DTYP_DATA | + E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN, + (E1000_ADVTXD_DCMD_VLE)); + + /* set segmentation bits for TSO */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO, + (E1000_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, + (E1000_ADVTXD_MAC_TSTAMP)); + + /* insert frame checksum */ + cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igb_tx_olinfo_status(struct igb_ring *tx_ring, + union e1000_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; + + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + + /* insert L4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_CSUM, + (E1000_TXD_POPTS_TXSM << 8)); + + /* insert IPv4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_IPV4, + (E1000_TXD_POPTS_IXSM << 8)); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igb_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + if (igb_desc_unused(tx_ring) >= size) + return 0; + return __igb_maybe_stop_tx(tx_ring, size); +} + +static int igb_tx_map(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = igb_tx_cmd_type(skb, tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = IGB_TX_DESC(tx_ring, i); + + igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGB_MAX_DATA_PER_TXD; + size -= IGB_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + dma_wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + return 0; + +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +int igb_xmit_xdp_ring(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + struct xdp_frame *xdpf) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 count, i, index = tx_ring->next_to_use; + struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index]; + struct igb_tx_buffer *tx_buffer = tx_head; + union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index); + u32 len = xdpf->len, cmd_type, olinfo_status; + void *data = xdpf->data; + + count = TXD_USE_COUNT(len); + for (i = 0; i < nr_frags; i++) + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); + + if (igb_maybe_stop_tx(tx_ring, count + 3)) + return IGB_XDP_CONSUMED; + + i = 0; + /* record the location of the first descriptor for this packet */ + tx_head->bytecount = xdp_get_frame_len(xdpf); + tx_head->type = IGB_TYPE_XDP; + tx_head->gso_segs = 1; + tx_head->xdpf = xdpf; + + olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT; + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + for (;;) { + dma_addr_t dma; + + dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto unmap; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + + /* put descriptor type bits */ + cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS | len; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + tx_buffer->protocol = 0; + + if (++index == tx_ring->count) + index = 0; + + if (i == nr_frags) + break; + + tx_buffer = &tx_ring->tx_buffer_info[index]; + tx_desc = IGB_TX_DESC(tx_ring, index); + tx_desc->read.olinfo_status = 0; + + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } + tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD); + + netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount); + /* set the timestamp */ + tx_head->time_stamp = jiffies; + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); + + /* set next_to_watch value indicating a packet is present */ + tx_head->next_to_watch = tx_desc; + tx_ring->next_to_use = index; + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + writel(index, tx_ring->tail); + + return IGB_XDP_TX; + +unmap: + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[index]; + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + if (tx_buffer == tx_head) + break; + + if (!index) + index += tx_ring->count; + index--; + } + + return IGB_XDP_CONSUMED; +} + +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, + struct igb_ring *tx_ring) +{ + struct igb_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + u8 hdr_len = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igb_maybe_stop_tx(tx_ring, count + 3)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGB_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGB_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + if (adapter->hw.mac.type == e1000_82576) + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGB_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igb_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igb_tx_csum(tx_ring, first); + + if (igb_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; +cleanup_tx_tstamp: + if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) { + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + if (adapter->hw.mac.type == e1000_82576) + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + } + + return NETDEV_TX_OK; +} + +static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); +} + +/** + * igb_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: number of the Tx queue that hung (unused) + **/ +static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + + if (hw->mac.type >= e1000_82580) + hw->dev_spec._82575.global_device_reset = true; + + schedule_work(&adapter->reset_task); + wr32(E1000_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +static void igb_reset_task(struct work_struct *work) +{ + struct igb_adapter *adapter; + adapter = container_of(work, struct igb_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igb_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igb_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igb_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + **/ +static void igb_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +/** + * igb_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int igb_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; + + if (adapter->xdp_prog) { + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + + if (max_frame > igb_rx_bufsz(ring)) { + netdev_warn(adapter->netdev, + "Requested MTU size is not supported with XDP. Max frame size is %d\n", + max_frame); + return -EINVAL; + } + } + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igb_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igb_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igb_up(adapter); + else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + + return 0; +} + +/** + * igb_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void igb_update_stats(struct igb_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 reg, mpc; + int i; + u64 bytes, packets; + unsigned int start; + u64 _bytes, _packets; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + bytes = 0; + packets = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(E1000_RQDPC(i)); + if (hw->mac.type >= e1000_i210) + wr32(E1000_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(E1000_CRCERRS); + adapter->stats.gprc += rd32(E1000_GPRC); + adapter->stats.gorc += rd32(E1000_GORCL); + rd32(E1000_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(E1000_BPRC); + adapter->stats.mprc += rd32(E1000_MPRC); + adapter->stats.roc += rd32(E1000_ROC); + + adapter->stats.prc64 += rd32(E1000_PRC64); + adapter->stats.prc127 += rd32(E1000_PRC127); + adapter->stats.prc255 += rd32(E1000_PRC255); + adapter->stats.prc511 += rd32(E1000_PRC511); + adapter->stats.prc1023 += rd32(E1000_PRC1023); + adapter->stats.prc1522 += rd32(E1000_PRC1522); + adapter->stats.symerrs += rd32(E1000_SYMERRS); + adapter->stats.sec += rd32(E1000_SEC); + + mpc = rd32(E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(E1000_SCC); + adapter->stats.ecol += rd32(E1000_ECOL); + adapter->stats.mcc += rd32(E1000_MCC); + adapter->stats.latecol += rd32(E1000_LATECOL); + adapter->stats.dc += rd32(E1000_DC); + adapter->stats.rlec += rd32(E1000_RLEC); + adapter->stats.xonrxc += rd32(E1000_XONRXC); + adapter->stats.xontxc += rd32(E1000_XONTXC); + adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); + adapter->stats.xofftxc += rd32(E1000_XOFFTXC); + adapter->stats.fcruc += rd32(E1000_FCRUC); + adapter->stats.gptc += rd32(E1000_GPTC); + adapter->stats.gotc += rd32(E1000_GOTCL); + rd32(E1000_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(E1000_RNBC); + adapter->stats.ruc += rd32(E1000_RUC); + adapter->stats.rfc += rd32(E1000_RFC); + adapter->stats.rjc += rd32(E1000_RJC); + adapter->stats.tor += rd32(E1000_TORH); + adapter->stats.tot += rd32(E1000_TOTH); + adapter->stats.tpr += rd32(E1000_TPR); + + adapter->stats.ptc64 += rd32(E1000_PTC64); + adapter->stats.ptc127 += rd32(E1000_PTC127); + adapter->stats.ptc255 += rd32(E1000_PTC255); + adapter->stats.ptc511 += rd32(E1000_PTC511); + adapter->stats.ptc1023 += rd32(E1000_PTC1023); + adapter->stats.ptc1522 += rd32(E1000_PTC1522); + + adapter->stats.mptc += rd32(E1000_MPTC); + adapter->stats.bptc += rd32(E1000_BPTC); + + adapter->stats.tpt += rd32(E1000_TPT); + adapter->stats.colc += rd32(E1000_COLC); + + adapter->stats.algnerrc += rd32(E1000_ALGNERRC); + /* read internal phy specific stats */ + reg = rd32(E1000_CTRL_EXT); + if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { + adapter->stats.rxerrc += rd32(E1000_RXERRC); + + /* this stat has invalid values on i210/i211 */ + if ((hw->mac.type != e1000_i210) && + (hw->mac.type != e1000_i211)) + adapter->stats.tncrs += rd32(E1000_TNCRS); + } + + adapter->stats.tsctc += rd32(E1000_TSCTC); + adapter->stats.tsctfc += rd32(E1000_TSCTFC); + + adapter->stats.iac += rd32(E1000_IAC); + adapter->stats.icrxoc += rd32(E1000_ICRXOC); + adapter->stats.icrxptc += rd32(E1000_ICRXPTC); + adapter->stats.icrxatc += rd32(E1000_ICRXATC); + adapter->stats.ictxptc += rd32(E1000_ICTXPTC); + adapter->stats.ictxatc += rd32(E1000_ICTXATC); + adapter->stats.ictxqec += rd32(E1000_ICTXQEC); + adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); + adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(E1000_MGTPTC); + adapter->stats.mgprc += rd32(E1000_MGTPRC); + adapter->stats.mgpdc += rd32(E1000_MGTPDC); + + /* OS2BMC Stats */ + reg = rd32(E1000_MANC); + if (reg & E1000_MANC_EN_BMC2OS) { + adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); + adapter->stats.o2bspc += rd32(E1000_O2BSPC); + adapter->stats.b2ospc += rd32(E1000_B2OSPC); + adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); + } +} + +static void igb_perout(struct igb_adapter *adapter, int tsintr_tt) +{ + int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt); + struct e1000_hw *hw = &adapter->hw; + struct timespec64 ts; + u32 tsauxc; + + if (pin < 0 || pin >= IGB_N_SDP) + return; + + spin_lock(&adapter->tmreg_lock); + + if (hw->mac.type == e1000_82580 || + hw->mac.type == e1000_i354 || + hw->mac.type == e1000_i350) { + s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period); + u32 systiml, systimh, level_mask, level, rem; + u64 systim, now; + + /* read systim registers in sequence */ + rd32(E1000_SYSTIMR); + systiml = rd32(E1000_SYSTIML); + systimh = rd32(E1000_SYSTIMH); + systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml); + now = timecounter_cyc2time(&adapter->tc, systim); + + if (pin < 2) { + level_mask = (tsintr_tt == 1) ? 0x80000 : 0x40000; + level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0; + } else { + level_mask = (tsintr_tt == 1) ? 0x80 : 0x40; + level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0; + } + + div_u64_rem(now, ns, &rem); + systim = systim + (ns - rem); + + /* synchronize pin level with rising/falling edges */ + div_u64_rem(now, ns << 1, &rem); + if (rem < ns) { + /* first half of period */ + if (level == 0) { + /* output is already low, skip this period */ + systim += ns; + pr_notice("igb: periodic output on %s missed falling edge\n", + adapter->sdp_config[pin].name); + } + } else { + /* second half of period */ + if (level == 1) { + /* output is already high, skip this period */ + systim += ns; + pr_notice("igb: periodic output on %s missed rising edge\n", + adapter->sdp_config[pin].name); + } + } + + /* for this chip family tv_sec is the upper part of the binary value, + * so not seconds + */ + ts.tv_nsec = (u32)systim; + ts.tv_sec = ((u32)(systim >> 32)) & 0xFF; + } else { + ts = timespec64_add(adapter->perout[tsintr_tt].start, + adapter->perout[tsintr_tt].period); + } + + /* u32 conversion of tv_sec is safe until y2106 */ + wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec); + wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT0; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[tsintr_tt].start = ts; + + spin_unlock(&adapter->tmreg_lock); +} + +static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) +{ + int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt); + int auxstmpl = (tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0; + int auxstmph = (tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0; + struct e1000_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + unsigned long flags; + + if (pin < 0 || pin >= IGB_N_SDP) + return; + + if (hw->mac.type == e1000_82580 || + hw->mac.type == e1000_i354 || + hw->mac.type == e1000_i350) { + u64 ns = rd32(auxstmpl); + + ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32; + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + ts = ns_to_timespec64(ns); + } else { + ts.tv_nsec = rd32(auxstmpl); + ts.tv_sec = rd32(auxstmph); + } + + event.type = PTP_CLOCK_EXTTS; + event.index = tsintr_tt; + event.timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec; + ptp_clock_event(adapter->ptp_clock, &event); +} + +static void igb_tsync_interrupt(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ack = 0, tsicr = rd32(E1000_TSICR); + struct ptp_clock_event event; + + if (tsicr & TSINTR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_SYS_WRAP; + } + + if (tsicr & E1000_TSICR_TXTS) { + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + ack |= E1000_TSICR_TXTS; + } + + if (tsicr & TSINTR_TT0) { + igb_perout(adapter, 0); + ack |= TSINTR_TT0; + } + + if (tsicr & TSINTR_TT1) { + igb_perout(adapter, 1); + ack |= TSINTR_TT1; + } + + if (tsicr & TSINTR_AUTT0) { + igb_extts(adapter, 0); + ack |= TSINTR_AUTT0; + } + + if (tsicr & TSINTR_AUTT1) { + igb_extts(adapter, 1); + ack |= TSINTR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(E1000_TSICR, ack); +} + +static irqreturn_t igb_msix_other(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct e1000_hw *hw = &adapter->hw; + u32 icr = rd32(E1000_ICR); + /* reading ICR causes bit 31 of EICR to be cleared */ + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + /* The DMA Out of Sync is also indication of a spoof event + * in IOV mode. Check the Wrong VM Behavior register to + * see if it is really a spoof event. + */ + igb_check_wvbr(adapter); + } + + /* Check for a mailbox event */ + if (icr & E1000_ICR_VMMB) + igb_msg_task(adapter); + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + wr32(E1000_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igb_write_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 itr_val = q_vector->itr_val & 0x7FFC; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = 0x4; + + if (adapter->hw.mac.type == e1000_82575) + itr_val |= itr_val << 16; + else + itr_val |= E1000_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igb_msix_ring(int irq, void *data) +{ + struct igb_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igb_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_IGB_DCA +static void igb_update_tx_dca(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + + if (hw->mac.type != e1000_82575) + txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; + + /* We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN | + E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); +} + +static void igb_update_rx_dca(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); + + if (hw->mac.type != e1000_82575) + rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; + + /* We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | + E1000_DCA_RXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); +} + +static void igb_update_dca(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + int cpu = get_cpu(); + + if (q_vector->cpu == cpu) + goto out_no_update; + + if (q_vector->tx.ring) + igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); + + if (q_vector->rx.ring) + igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); + + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +static void igb_setup_dca(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) + return; + + /* Always use CB2 mode, difference is masked in the CB driver. */ + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); + + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; + igb_update_dca(adapter->q_vector[i]); + } +} + +static int __igb_notify_dca(struct device *dev, void *data) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + unsigned long event = *(unsigned long *)data; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if already enabled, don't do it again */ + if (adapter->flags & IGB_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + break; + } + fallthrough; /* since DCA is disabled. */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + /* without this a class_device is left + * hanging around in the sysfs model + */ + dca_remove_requester(dev); + dev_info(&pdev->dev, "DCA disabled\n"); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } + break; + } + + return 0; +} + +static int igb_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, + __igb_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} +#endif /* CONFIG_IGB_DCA */ + +#ifdef CONFIG_PCI_IOV +static int igb_vf_configure(struct igb_adapter *adapter, int vf) +{ + unsigned char mac_addr[ETH_ALEN]; + + eth_zero_addr(mac_addr); + igb_set_vf_mac(adapter, vf, mac_addr); + + /* By default spoof check is enabled for all VFs */ + adapter->vf_data[vf].spoofchk_enabled = true; + + /* By default VFs are not trusted */ + adapter->vf_data[vf].trusted = false; + + return 0; +} + +#endif +static void igb_ping_all_vfs(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0 ; i < adapter->vfs_allocated_count; i++) { + ping = E1000_PF_CONTROL_MSG; + if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) + ping |= E1000_VT_MSGTYPE_CTS; + igb_write_mbx(hw, &ping, 1, i); + } +} + +static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr = rd32(E1000_VMOLR(vf)); + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | + IGB_VF_FLAG_MULTI_PROMISC); + vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { + vmolr |= E1000_VMOLR_MPME; + vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; + *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; + } else { + /* if we have hashes and we are clearing a multicast promisc + * flag we need to write the hashes to the MTA as this step + * was previously skipped + */ + if (vf_data->num_vf_mc_hashes > 30) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + int j; + + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + } + + wr32(E1000_VMOLR(vf), vmolr); + + /* there are flags left unprocessed, likely not supported */ + if (*msgbuf & E1000_VT_MSGINFO_MASK) + return -EINVAL; + + return 0; +} + +static int igb_set_vf_multicasts(struct igb_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + int i; + + /* salt away the number of multicast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vf_data->num_vf_mc_hashes = n; + + /* only up to 30 hash values supported */ + if (n > 30) + n = 30; + + /* store the hashes for later use */ + for (i = 0; i < n; i++) + vf_data->vf_mc_hashes[i] = hash_list[i]; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); + + return 0; +} + +static void igb_restore_vf_multicasts(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data; + int i, j; + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + u32 vmolr = rd32(E1000_VMOLR(i)); + + vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + vf_data = &adapter->vf_data[i]; + + if ((vf_data->num_vf_mc_hashes > 30) || + (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + wr32(E1000_VMOLR(i), vmolr); + } +} + +static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pool_mask, vlvf_mask, i; + + /* create mask for VF and other pools */ + pool_mask = E1000_VLVF_POOLSEL_MASK; + vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf); + + /* drop PF from pool bits */ + pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count); + + /* Find the vlan filter for this id */ + for (i = E1000_VLVF_ARRAY_SIZE; i--;) { + u32 vlvf = rd32(E1000_VLVF(i)); + u32 vfta_mask, vid, vfta; + + /* remove the vf from the pool */ + if (!(vlvf & vlvf_mask)) + continue; + + /* clear out bit from VLVF */ + vlvf ^= vlvf_mask; + + /* if other pools are present, just remove ourselves */ + if (vlvf & pool_mask) + goto update_vlvfb; + + /* if PF is present, leave VFTA */ + if (vlvf & E1000_VLVF_POOLSEL_MASK) + goto update_vlvf; + + vid = vlvf & E1000_VLVF_VLANID_MASK; + vfta_mask = BIT(vid % 32); + + /* clear bit from VFTA */ + vfta = adapter->shadow_vfta[vid / 32]; + if (vfta & vfta_mask) + hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask); +update_vlvf: + /* clear pool selection enable */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + vlvf &= E1000_VLVF_POOLSEL_MASK; + else + vlvf = 0; +update_vlvfb: + /* clear pool bits */ + wr32(E1000_VLVF(i), vlvf); + } +} + +static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan) +{ + u32 vlvf; + int idx; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the VLAN id in the VLVF entries */ + for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) { + vlvf = rd32(E1000_VLVF(idx)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + return idx; +} + +static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) +{ + struct e1000_hw *hw = &adapter->hw; + u32 bits, pf_id; + int idx; + + idx = igb_find_vlvf_entry(hw, vid); + if (!idx) + return; + + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK; + bits &= rd32(E1000_VLVF(idx)); + + /* Disable the filter so this falls into the default pool. */ + if (!bits) { + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + wr32(E1000_VLVF(idx), BIT(pf_id)); + else + wr32(E1000_VLVF(idx), 0); + } +} + +static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, + bool add, u32 vf) +{ + int pf_id = adapter->vfs_allocated_count; + struct e1000_hw *hw = &adapter->hw; + int err; + + /* If VLAN overlaps with one the PF is currently monitoring make + * sure that we are able to allocate a VLVF entry. This may be + * redundant but it guarantees PF will maintain visibility to + * the VLAN. + */ + if (add && test_bit(vid, adapter->active_vlans)) { + err = igb_vfta_set(hw, vid, pf_id, true, false); + if (err) + return err; + } + + err = igb_vfta_set(hw, vid, vf, add, false); + + if (add && !err) + return err; + + /* If we failed to add the VF VLAN or we are removing the VF VLAN + * we may need to drop the PF pool bit in order to allow us to free + * up the VLVF resources. + */ + if (test_bit(vid, adapter->active_vlans) || + (adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_update_pf_vlvf(adapter, vid); + + return err; +} + +static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + + if (vid) + wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); + else + wr32(E1000_VMVIR(vf), 0); +} + +static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf, + u16 vlan, u8 qos) +{ + int err; + + err = igb_set_vf_vlan(adapter, vlan, true, vf); + if (err) + return err; + + igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vlan); + + /* revoke access to previous VLAN */ + if (vlan != adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; + igb_set_vf_vlan_strip(adapter, vf, true); + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + + return err; +} + +static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf) +{ + /* Restore tagless access via VLAN 0 */ + igb_set_vf_vlan(adapter, 0, true, vf); + + igb_set_vmvir(adapter, 0, vf); + igb_set_vmolr(adapter, vf, true); + + /* Remove any PF assigned VLAN */ + if (adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; + igb_set_vf_vlan_strip(adapter, vf, false); + + return 0; +} + +static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) : + igb_disable_port_vlan(adapter, vf); +} + +static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + int ret; + + if (adapter->vf_data[vf].pf_vlan) + return -1; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + ret = igb_set_vf_vlan(adapter, vid, !!add, vf); + if (!ret) + igb_set_vf_vlan_strip(adapter, vf, !!vid); + return ret; +} + +static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) +{ + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + /* clear flags - except flag that indicates PF has set the MAC */ + vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC; + vf_data->last_nack = jiffies; + + /* reset vlans for device */ + igb_clear_vf_vfta(adapter, vf); + igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf); + igb_set_vmvir(adapter, vf_data->pf_vlan | + (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); + igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan)); + + /* reset multicast table array for vf */ + adapter->vf_data[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); +} + +static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) +{ + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + + /* clear mac address as we were hotplug removed/added */ + if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) + eth_zero_addr(vf_mac); + + /* process remaining reset events */ + igb_vf_reset(adapter, vf); +} + +static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + u32 reg, msgbuf[3] = {}; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* process all the same items cleared in a function level reset */ + igb_vf_reset(adapter, vf); + + /* set vf mac address */ + igb_set_vf_mac(adapter, vf, vf_mac); + + /* enable transmit and receive for vf */ + reg = rd32(E1000_VFTE); + wr32(E1000_VFTE, reg | BIT(vf)); + reg = rd32(E1000_VFRE); + wr32(E1000_VFRE, reg | BIT(vf)); + + adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; + + /* reply to reset with ack and vf mac address */ + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK; + } + igb_write_mbx(hw, msgbuf, 3, vf); +} + +static void igb_flush_mac_table(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < hw->mac.rar_entry_count; i++) { + adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; + eth_zero_addr(adapter->mac_table[i].addr); + adapter->mac_table[i].queue = 0; + igb_rar_set_index(adapter, i); + } +} + +static int igb_available_rars(struct igb_adapter *adapter, u8 queue) +{ + struct e1000_hw *hw = &adapter->hw; + /* do not count rar entries reserved for VFs MAC addresses */ + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i, count = 0; + + for (i = 0; i < rar_entries; i++) { + /* do not count default entries */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) + continue; + + /* do not count "in use" entries for different queues */ + if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) && + (adapter->mac_table[i].queue != queue)) + continue; + + count++; + } + + return count; +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igb_set_default_mac_filter(struct igb_adapter *adapter) +{ + struct igb_mac_addr *mac_table = &adapter->mac_table[0]; + + ether_addr_copy(mac_table->addr, adapter->hw.mac.addr); + mac_table->queue = adapter->vfs_allocated_count; + mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + + igb_rar_set_index(adapter, 0); +} + +/* If the filter to be added and an already existing filter express + * the same address and address type, it should be possible to only + * override the other configurations, for example the queue to steer + * traffic. + */ +static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry, + const u8 *addr, const u8 flags) +{ + if (!(entry->state & IGB_MAC_STATE_IN_USE)) + return true; + + if ((entry->state & IGB_MAC_STATE_SRC_ADDR) != + (flags & IGB_MAC_STATE_SRC_ADDR)) + return false; + + if (!ether_addr_equal(addr, entry->addr)) + return false; + + return true; +} + +/* Add a MAC filter for 'addr' directing matching traffic to 'queue', + * 'flags' is used to indicate what kind of match is made, match is by + * default for the destination address, if matching by source address + * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_add_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for the first empty entry in the MAC table. + * Do not touch entries at the end of the table reserved for the VF MAC + * addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!igb_mac_entry_can_be_used(&adapter->mac_table[i], + addr, flags)) + continue; + + ether_addr_copy(adapter->mac_table[i].addr, addr); + adapter->mac_table[i].queue = queue; + adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags; + + igb_rar_set_index(adapter, i); + return i; + } + + return -ENOSPC; +} + +static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + return igb_add_mac_filter_flags(adapter, addr, queue, 0); +} + +/* Remove a MAC filter for 'addr' directing matching traffic to + * 'queue', 'flags' is used to indicate what kind of match need to be + * removed, match is by default for the destination address, if + * matching by source address is to be removed the flag + * IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_del_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count - + adapter->vfs_allocated_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for matching entry in the MAC table based on given address + * and queue. Do not touch entries at the end of the table reserved + * for the VF MAC addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) + continue; + if ((adapter->mac_table[i].state & flags) != flags) + continue; + if (adapter->mac_table[i].queue != queue) + continue; + if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) + continue; + + /* When a filter for the default address is "deleted", + * we return it to its initial configuration + */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) { + adapter->mac_table[i].state = + IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + adapter->mac_table[i].queue = + adapter->vfs_allocated_count; + } else { + adapter->mac_table[i].state = 0; + adapter->mac_table[i].queue = 0; + eth_zero_addr(adapter->mac_table[i].addr); + } + + igb_rar_set_index(adapter, i); + return 0; + } + + return -ENOENT; +} + +static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, 0); +} + +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + + /* In theory, this should be supported on 82575 as well, but + * that part wasn't easily accessible during development. + */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + return igb_add_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + +static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count); + + return min_t(int, ret, 0); +} + +static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count); + + return 0; +} + +static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, + const u32 info, const u8 *addr) +{ + struct pci_dev *pdev = adapter->pdev; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + struct list_head *pos; + struct vf_mac_filter *entry = NULL; + int ret = 0; + + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { + dev_warn(&pdev->dev, + "VF %d requested MAC filter but is administratively denied\n", + vf); + return -EINVAL; + } + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC filter\n", + vf); + return -EINVAL; + } + + switch (info) { + case E1000_VF_MAC_FILTER_CLR: + /* remove all unicast MAC filters related to the current VF */ + list_for_each(pos, &adapter->vf_macs.l) { + entry = list_entry(pos, struct vf_mac_filter, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + igb_del_mac_filter(adapter, entry->vf_mac, vf); + } + } + break; + case E1000_VF_MAC_FILTER_ADD: + /* try to find empty slot in the list */ + list_for_each(pos, &adapter->vf_macs.l) { + entry = list_entry(pos, struct vf_mac_filter, l); + if (entry->free) + break; + } + + if (entry && entry->free) { + entry->free = false; + entry->vf = vf; + ether_addr_copy(entry->vf_mac, addr); + + ret = igb_add_mac_filter(adapter, addr, vf); + ret = min_t(int, ret, 0); + } else { + ret = -ENOSPC; + } + + if (ret == -ENOSPC) + dev_warn(&pdev->dev, + "VF %d has requested MAC filter but there is no space for it\n", + vf); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) +{ + struct pci_dev *pdev = adapter->pdev; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + u32 info = msg[0] & E1000_VT_MSGINFO_MASK; + + /* The VF MAC Address is stored in a packed array of bytes + * starting at the second 32 bit word of the msg array + */ + unsigned char *addr = (unsigned char *)&msg[1]; + int ret = 0; + + if (!info) { + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { + dev_warn(&pdev->dev, + "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", + vf); + return -EINVAL; + } + + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC\n", + vf); + return -EINVAL; + } + + ret = igb_set_vf_mac(adapter, vf, addr); + } else { + ret = igb_set_vf_mac_filter(adapter, vf, info, addr); + } + + return ret; +} + +static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + u32 msg = E1000_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!(vf_data->flags & IGB_VF_FLAG_CTS) && + time_after(jiffies, vf_data->last_nack + (2 * HZ))) { + igb_write_mbx(hw, &msg, 1, vf); + vf_data->last_nack = jiffies; + } +} + +static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct pci_dev *pdev = adapter->pdev; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + s32 retval; + + retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false); + + if (retval) { + /* if receive failed revoke VF CTS stats and restart init */ + dev_err(&pdev->dev, "Error receiving message from VF\n"); + vf_data->flags &= ~IGB_VF_FLAG_CTS; + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + goto unlock; + goto out; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) + goto unlock; + + /* until the vf completes a reset it should not be + * allowed to start any configuration. + */ + if (msgbuf[0] == E1000_VF_RESET) { + /* unlocks mailbox */ + igb_vf_reset_msg(adapter, vf); + return; + } + + if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + goto unlock; + retval = -1; + goto out; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case E1000_VF_SET_MAC_ADDR: + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case E1000_VF_SET_PROMISC: + retval = igb_set_vf_promisc(adapter, msgbuf, vf); + break; + case E1000_VF_SET_MULTICAST: + retval = igb_set_vf_multicasts(adapter, msgbuf, vf); + break; + case E1000_VF_SET_LPE: + retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); + break; + case E1000_VF_SET_VLAN: + retval = -1; + if (vf_data->pf_vlan) + dev_warn(&pdev->dev, + "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", + vf); + else + retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + default: + dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); + retval = -1; + break; + } + + msgbuf[0] |= E1000_VT_MSGTYPE_CTS; +out: + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; + else + msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + + /* unlocks mailbox */ + igb_write_mbx(hw, msgbuf, 1, vf); + return; + +unlock: + igb_unlock_mbx(hw, vf); +} + +static void igb_msg_task(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + u32 vf; + + spin_lock_irqsave(&adapter->vfs_lock, flags); + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { + /* process any reset requests */ + if (!igb_check_for_rst(hw, vf)) + igb_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!igb_check_for_msg(hw, vf)) + igb_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!igb_check_for_ack(hw, vf)) + igb_rcv_ack_from_vf(adapter, vf); + } + spin_unlock_irqrestore(&adapter->vfs_lock, flags); +} + +/** + * igb_set_uta - Set unicast filter table address + * @adapter: board private structure + * @set: boolean indicating if we are setting or clearing bits + * + * The unicast table address is a register array of 32-bit registers. + * The table is meant to be used in a way similar to how the MTA is used + * however due to certain limitations in the hardware it is necessary to + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous + * enable bit to allow vlan tag stripping when promiscuous mode is enabled + **/ +static void igb_set_uta(struct igb_adapter *adapter, bool set) +{ + struct e1000_hw *hw = &adapter->hw; + u32 uta = set ? ~0 : 0; + int i; + + /* we only need to do this if VMDq is enabled */ + if (!adapter->vfs_allocated_count) + return; + + for (i = hw->mac.uta_reg_count; i--;) + array_wr32(E1000_UTA, i, uta); +} + +/** + * igb_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr_msi(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(E1000_ICR); + + igb_write_itr(q_vector); + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igb_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(E1000_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igb_write_itr(q_vector); + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igb_ring_irq_enable(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if ((adapter->num_q_vectors == 1) && !adapter->vf_data) + igb_set_itr(q_vector); + else + igb_update_ring_itr(q_vector); + } + + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) + wr32(E1000_EIMS, q_vector->eims_value); + else + igb_irq_enable(adapter); + } +} + +/** + * igb_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + **/ +static int igb_poll(struct napi_struct *napi, int budget) +{ + struct igb_q_vector *q_vector = container_of(napi, + struct igb_q_vector, + napi); + bool clean_complete = true; + int work_done = 0; + +#ifdef CONFIG_IGB_DCA + if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) + igb_update_dca(q_vector); +#endif + if (q_vector->tx.ring) + clean_complete = igb_clean_tx_irq(q_vector, budget); + + if (q_vector->rx.ring) { + int cleaned = igb_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igb_ring_irq_enable(q_vector); + + return work_done; +} + +/** + * igb_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + **/ +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *tx_ring = q_vector->tx.ring; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__IGB_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGB_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + if (tx_buffer->type == IGB_TYPE_SKB) + napi_consume_skb(tx_buffer->skb, napi_budget); + else + xdp_return_frame(tx_buffer->xdpf); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + dma_unmap_len_set(tx_buffer, len, 0); + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; + u64_stats_update_end(&tx_ring->tx_syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct e1000_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + dev_err(tx_ring->dev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(E1000_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && + netif_carrier_ok(tx_ring->netdev) && + igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +/** + * igb_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void igb_reuse_rx_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *old_buff) +{ + struct igb_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, + int rx_buf_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGB_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGB_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igb_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + **/ +static void igb_add_rx_frag(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGB_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + unsigned int size = xdp->data_end - xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGB_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (xdp->data + headlen) - page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* build an skb around the page buffer */ + skb = napi_build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); + + if (metasize) + skb_metadata_set(skb, metasize); + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + struct xdp_buff *xdp) +{ + int err, result = IGB_XDP_PASS; + struct bpf_prog *xdp_prog; + u32 act; + + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) + goto xdp_out; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + result = igb_xdp_xmit_back(adapter, xdp); + if (result == IGB_XDP_CONSUMED) + goto out_failure; + break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (err) + goto out_failure; + result = IGB_XDP_REDIR; + break; + default: + bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = IGB_XDP_CONSUMED; + break; + } +xdp_out: + return ERR_PTR(-result); +} + +static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGB_SKB_PAD + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +static void igb_rx_buffer_flip(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + unsigned int size) +{ + unsigned int truesize = igb_rx_frame_truesize(rx_ring, size); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static inline void igb_rx_checksum(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igb_test_staterr(rx_desc, + E1000_RXDEXT_STATERR_TCPE | + E1000_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets, (aka let the stack check the crc32c) + */ + if (!((skb->len == 60) && + test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | + E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(ring->dev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +static inline void igb_rx_hash(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); +} + +/** + * igb_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool igb_is_non_eop(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGB_RX_DESC(rx_ring, ntc)); + + if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igb_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool igb_cleanup_headers(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely((igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { + struct net_device *netdev = rx_ring->netdev; + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * igb_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + + igb_rx_hash(rx_ring, rx_desc, skb); + + igb_rx_checksum(rx_ring, rx_desc, skb); + + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && + !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { + u16 vid; + + if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && + test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static unsigned int igb_rx_offset(struct igb_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0; +} + +static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, + const unsigned int size, int *rx_buf_pgcnt) +{ + struct igb_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buf_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igb_put_rx_buffer(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt) +{ + if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) { + /* hand second half of page back to the ring */ + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = igb_desc_unused(rx_ring); + unsigned int xdp_xmit = 0; + struct xdp_buff xdp; + u32 frame_sz = 0; + int rx_buf_pgcnt; + + /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ +#if (PAGE_SIZE < 8192) + frame_sz = igb_rx_frame_truesize(rx_ring, 0); +#endif + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); + + while (likely(total_packets < budget)) { + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *rx_buffer; + ktime_t timestamp = 0; + int pkt_offset = 0; + unsigned int size; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGB_RX_BUFFER_WRITE) { + igb_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + /* pull rx packet timestamp if available and valid */ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + int ts_hdr_len; + + ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector, + pktbuf, ×tamp); + + pkt_offset += ts_hdr_len; + size -= ts_hdr_len; + } + + /* retrieve a buffer from the ring */ + if (!skb) { + unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring); + unsigned int offset = pkt_offset + igb_rx_offset(rx_ring); + + xdp_prepare_buff(&xdp, hard_start, offset, size, true); + xdp_buff_clear_frags_flag(&xdp); +#if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ + xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); +#endif + skb = igb_run_xdp(adapter, rx_ring, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) { + xdp_xmit |= xdp_res; + igb_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_packets++; + total_bytes += size; + } else if (skb) + igb_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igb_build_skb(rx_ring, rx_buffer, &xdp, + timestamp); + else + skb = igb_construct_skb(rx_ring, rx_buffer, + &xdp, timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt); + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igb_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + if (xdp_xmit & IGB_XDP_REDIR) + xdp_do_flush(); + + if (xdp_xmit & IGB_XDP_TX) { + struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); + + igb_xdp_ring_update_tail(tx_ring); + } + + u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; + u64_stats_update_end(&rx_ring->rx_syncp); + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; + + if (cleaned_count) + igb_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igb_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igb_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGB_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, igb_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igb_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igb_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: rx descriptor ring to allocate new receive buffers + * @cleaned_count: count of buffers to allocate + **/ +void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGB_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igb_rx_bufsz(rx_ring); + + do { + if (!igb_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGB_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + dma_wmb(); + writel(i, rx_ring->tail); + } +} + +/** + * igb_mii_ioctl - + * @netdev: pointer to netdev struct + * @ifr: interface structure + * @cmd: ioctl command to execute + **/ +static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) + return -EIO; + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * igb_ioctl - + * @netdev: pointer to netdev struct + * @ifr: interface structure + * @cmd: ioctl command to execute + **/ +static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return igb_mii_ioctl(netdev, ifr, cmd); + case SIOCGHWTSTAMP: + return igb_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igb_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_read_word(adapter->pdev, reg, value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_write_word(adapter->pdev, reg, *value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl; + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + + /* Disable CFI check */ + rctl = rd32(E1000_RCTL); + rctl &= ~E1000_RCTL_CFIEN; + wr32(E1000_RCTL, rctl); + } else { + /* disable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl &= ~E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + } + + igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable); +} + +static int igb_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + + /* add the filter since PF can receive vlans w/o entry in vlvf */ + if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, true, !!vid); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int igb_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int pf_id = adapter->vfs_allocated_count; + struct e1000_hw *hw = &adapter->hw; + + /* remove VID from filter table */ + if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, false, true); + + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +static void igb_restore_vlan(struct igb_adapter *adapter) +{ + u16 vid = 1; + + igb_vlan_mode(adapter->netdev, adapter->netdev->features); + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NIC's only allow 1000 gbps Full duplex + * and 100Mbps Full duplex for 100baseFx sfp + */ + if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + case SPEED_10 + DUPLEX_FULL: + case SPEED_100 + DUPLEX_HALF: + goto err_inval; + default: + break; + } + } + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + bool wake; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igb_close(netdev, true); + + igb_ptp_suspend(adapter); + + igb_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + igb_setup_rctl(adapter); + igb_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = rd32(E1000_RCTL); + rctl |= E1000_RCTL_MPE; + wr32(E1000_RCTL, rctl); + } + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_ADVD3WUC; + wr32(E1000_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igb_disable_pcie_master(hw); + + wr32(E1000_WUC, E1000_WUC_PME_EN); + wr32(E1000_WUFC, wufc); + } else { + wr32(E1000_WUC, 0); + wr32(E1000_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igb_power_down_link(adapter); + else + igb_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +static void igb_deliver_wake_packet(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if ((wupl == 0) || (wupl > E1000_WUPM_BYTES)) + return; + + skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igb_suspend(struct device *dev) +{ + return __igb_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused __igb_resume(struct device *dev, bool rpm) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "igb: Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + val = rd32(E1000_WUS); + if (val & WAKE_PKT_WUS) + igb_deliver_wake_packet(netdev); + + wr32(E1000_WUS, ~0); + + if (!rpm) + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igb_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + if (!rpm) + rtnl_unlock(); + + return err; +} + +static int __maybe_unused igb_resume(struct device *dev) +{ + return __igb_resume(dev, false); +} + +static int __maybe_unused igb_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (!igb_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} + +static int __maybe_unused igb_runtime_suspend(struct device *dev) +{ + return __igb_shutdown(to_pci_dev(dev), NULL, 1); +} + +static int __maybe_unused igb_runtime_resume(struct device *dev) +{ + return __igb_resume(dev, true); +} + +static void igb_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igb_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PCI_IOV +static int igb_sriov_reinit(struct pci_dev *dev) +{ + struct net_device *netdev = pci_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + rtnl_lock(); + + if (netif_running(netdev)) + igb_close(netdev); + else + igb_reset(adapter); + + igb_clear_interrupt_scheme(adapter); + + igb_init_queue_configuration(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + rtnl_unlock(); + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + igb_open(netdev); + + rtnl_unlock(); + + return 0; +} + +static int igb_pci_disable_sriov(struct pci_dev *dev) +{ + int err = igb_disable_sriov(dev); + + if (!err) + err = igb_sriov_reinit(dev); + + return err; +} + +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) +{ + int err = igb_enable_sriov(dev, num_vfs); + + if (err) + goto out; + + err = igb_sriov_reinit(dev); + if (!err) + return num_vfs; + +out: + return err; +} + +#endif +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + if (num_vfs == 0) + return igb_pci_disable_sriov(dev); + else + return igb_pci_enable_sriov(dev, num_vfs); +#endif + return 0; +} + +/** + * igb_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igb_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igb_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the __igb_resume routine. + **/ +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter lose its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igb_reset(adapter); + wr32(E1000_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igb_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the __igb_resume routine. + */ +static void igb_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (igb_up(adapter)) { + dev_err(&pdev->dev, "igb_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); +} + +/** + * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table + * @adapter: Pointer to adapter structure + * @index: Index of the RAR entry which need to be synced with MAC table + **/ +static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rar_low, rar_high; + u8 *addr = adapter->mac_table[index].addr; + + /* HW expects these to be in network order when they are plugged + * into the registers which are little endian. In order to guarantee + * that ordering we need to do an leXX_to_cpup here in order to be + * ready for the byteswap that occurs with writel + */ + rar_low = le32_to_cpup((__le32 *)(addr)); + rar_high = le16_to_cpup((__le16 *)(addr + 4)); + + /* Indicate to hardware the Address is Valid. */ + if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) { + if (is_valid_ether_addr(addr)) + rar_high |= E1000_RAH_AV; + + if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR) + rar_high |= E1000_RAH_ASEL_SRC_ADDR; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: + if (adapter->mac_table[index].state & + IGB_MAC_STATE_QUEUE_STEERING) + rar_high |= E1000_RAH_QSEL_ENABLE; + + rar_high |= E1000_RAH_POOL_1 * + adapter->mac_table[index].queue; + break; + default: + rar_high |= E1000_RAH_POOL_1 << + adapter->mac_table[index].queue; + break; + } + } + + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +static int igb_set_vf_mac(struct igb_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + struct e1000_hw *hw = &adapter->hw; + /* VF MAC addresses start at end of receive addresses and moves + * towards the first, as a result a collision should not be possible + */ + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses; + + ether_addr_copy(vf_mac_addr, mac_addr); + ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr); + adapter->mac_table[rar_entry].queue = vf; + adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE; + igb_rar_set_index(adapter, rar_entry); + + return 0; +} + +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC + * flag and allows to overwrite the MAC via VF netdev. This + * is necessary to allow libvirt a way to restore the original + * MAC after unbinding vfio-pci and reloading igbvf after shutting + * down a VM. + */ + if (is_zero_ether_addr(mac)) { + adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, + "remove administratively set MAC on VF %d\n", + vf); + } else if (is_valid_ether_addr(mac)) { + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", + mac, vf); + dev_info(&adapter->pdev->dev, + "Reload the VF driver to make this change effective."); + /* Generate additional warning if PF is down */ + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { + return -EINVAL; + } + return igb_set_vf_mac(adapter, vf, mac); +} + +static int igb_link_mbps(int internal_link_speed) +{ + switch (internal_link_speed) { + case SPEED_100: + return 100; + case SPEED_1000: + return 1000; + default: + return 0; + } +} + +static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, + int link_speed) +{ + int rf_dec, rf_int; + u32 bcnrc_val; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); + rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) / + tx_rate; + + bcnrc_val = E1000_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) & + E1000_RTTBCNRC_RF_INT_MASK); + bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ + /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. + */ + wr32(E1000_RTTBCNRM, 0x14); + wr32(E1000_RTTBCNRC, bcnrc_val); +} + +static void igb_check_vf_rate_limit(struct igb_adapter *adapter) +{ + int actual_link_speed, i; + bool reset_rate = false; + + /* VF TX rate limit was not set or not supported */ + if ((adapter->vf_rate_link_speed == 0) || + (adapter->hw.mac.type != e1000_82576)) + return; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if (actual_link_speed != adapter->vf_rate_link_speed) { + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + if (reset_rate) + adapter->vf_data[i].tx_rate = 0; + + igb_set_vf_rate_limit(&adapter->hw, i, + adapter->vf_data[i].tx_rate, + actual_link_speed); + } +} + +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int actual_link_speed; + + if (hw->mac.type != e1000_82576) + return -EOPNOTSUPP; + + if (min_tx_rate) + return -EINVAL; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if ((vf >= adapter->vfs_allocated_count) || + (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || + (max_tx_rate < 0) || + (max_tx_rate > actual_link_speed)) + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; + adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; + igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); + + return 0; +} + +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 reg_val, reg_offset; + + if (!adapter->vfs_allocated_count) + return -EOPNOTSUPP; + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; + reg_val = rd32(reg_offset); + if (setting) + reg_val |= (BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); + else + reg_val &= ~(BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); + wr32(reg_offset, reg_val); + + adapter->vf_data[vf].spoofchk_enabled = setting; + return 0; +} + +static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + if (adapter->vf_data[vf].trusted == setting) + return 0; + + adapter->vf_data[vf].trusted = setting; + + dev_info(&adapter->pdev->dev, "VF %u is %strusted\n", + vf, setting ? "" : "not "); + return 0; +} + +static int igb_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); + ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; + ivi->min_tx_rate = 0; + ivi->vlan = adapter->vf_data[vf].pf_vlan; + ivi->qos = adapter->vf_data[vf].pf_qos; + ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; + ivi->trusted = adapter->vf_data[vf].trusted; + return 0; +} + +static void igb_vmm_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: + case e1000_i211: + case e1000_i354: + default: + /* replication is not supported for 82575 */ + return; + case e1000_82576: + /* notify HW that the MAC is adding vlan tags */ + reg = rd32(E1000_DTXCTL); + reg |= E1000_DTXCTL_VLAN_ADDED; + wr32(E1000_DTXCTL, reg); + fallthrough; + case e1000_82580: + /* enable replication vlan tag stripping */ + reg = rd32(E1000_RPLOLR); + reg |= E1000_RPLOLR_STRVLAN; + wr32(E1000_RPLOLR, reg); + fallthrough; + case e1000_i350: + /* none of the above registers are supported by i350 */ + break; + } + + if (adapter->vfs_allocated_count) { + igb_vmdq_set_loopback_pf(hw, true); + igb_vmdq_set_replication_pf(hw, true); + igb_vmdq_set_anti_spoofing_pf(hw, true, + adapter->vfs_allocated_count); + } else { + igb_vmdq_set_loopback_pf(hw, false); + igb_vmdq_set_replication_pf(hw, false); + } +} + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) +{ + struct e1000_hw *hw = &adapter->hw; + u32 dmac_thr; + u16 hwm; + u32 reg; + + if (hw->mac.type > e1000_82580) { + if (adapter->flags & IGB_FLAG_DMAC) { + /* force threshold to 0. */ + wr32(E1000_DMCTXTH, 0); + + /* DMA Coalescing high water mark needs to be greater + * than the Rx threshold. Set hwm to PBA - max frame + * size in 16B units, capping it at PBA - 6KB. + */ + hwm = 64 * (pba - 6); + reg = rd32(E1000_FCRTC); + reg &= ~E1000_FCRTC_RTH_COAL_MASK; + reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) + & E1000_FCRTC_RTH_COAL_MASK); + wr32(E1000_FCRTC, reg); + + /* Set the DMA Coalescing Rx threshold to PBA - 2 * max + * frame size, capping it at PBA - 10KB. + */ + dmac_thr = pba - 10; + reg = rd32(E1000_DMACR); + reg &= ~E1000_DMACR_DMACTHR_MASK; + reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); + + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + + /* watchdog timer= +-1000 usec in 32usec intervals */ + reg |= (1000 >> 5); + + /* Disable BMC-to-OS Watchdog Enable */ + if (hw->mac.type != e1000_i354) + reg &= ~E1000_DMACR_DC_BMC2OSW_EN; + wr32(E1000_DMACR, reg); + + /* no lower threshold to disable + * coalescing(smart fifb)-UTRESH=0 + */ + wr32(E1000_DMCRTRH, 0); + + reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); + + wr32(E1000_DMCTLX, reg); + + /* free space in tx packet buffer to wake from + * DMA coal + */ + wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - + (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + } + + if (hw->mac.type >= e1000_i210 || + (adapter->flags & IGB_FLAG_DMAC)) { + reg = rd32(E1000_PCIEMISC); + reg |= E1000_PCIEMISC_LX_DECISION; + wr32(E1000_PCIEMISC, reg); + } /* endif adapter->dmac is not disabled */ + } else if (hw->mac.type == e1000_82580) { + u32 reg = rd32(E1000_PCIEMISC); + + wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); + wr32(E1000_DMACR, 0); + } +} + +/** + * igb_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = 0; + + if (!this_client) + return E1000_ERR_I2C; + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + + status = i2c_smbus_read_byte_data(this_client, byte_offset); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status < 0) + return E1000_ERR_I2C; + else { + *data = status; + return 0; + } +} + +/** + * igb_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = E1000_SWFW_PHY0_SM; + + if (!this_client) + return E1000_ERR_I2C; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + status = i2c_smbus_write_byte_data(this_client, byte_offset, data); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status) + return E1000_ERR_I2C; + else + return 0; + +} + +int igb_reinit_queues(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (netif_running(netdev)) + igb_close(netdev); + + igb_reset_interrupt_capability(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igb_open(netdev); + + return err; +} + +static void igb_nfc_filter_exit(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_erase_filter(adapter, rule); + + hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) + igb_erase_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} + +static void igb_nfc_filter_restore(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_add_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} +/* igb_main.c */ diff --git a/devices/igb/igb_ptp-5.14-ethercat.c b/devices/igb/igb_ptp-5.14-ethercat.c new file mode 100644 index 00000000..c4737262 --- /dev/null +++ b/devices/igb/igb_ptp-5.14-ethercat.c @@ -0,0 +1,1383 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2011 Richard Cochran */ + +#include +#include +#include +#include + +#include "igb-5.14-ethercat.h" + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +/* The 82580 timesync updates the system timer every 8ns by 8ns, + * and this update value cannot be reprogrammed. + * + * Neither the 82576 nor the 82580 offer registers wide enough to hold + * nanoseconds time values for very long. For the 82580, SYSTIM always + * counts nanoseconds, but the upper 24 bits are not available. The + * frequency is adjusted by changing the 32 bit fractional nanoseconds + * register, TIMINCA. + * + * For the 82576, the SYSTIM register time unit is affect by the + * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this + * field are needed to provide the nominal 16 nanosecond period, + * leaving 19 bits for fractional nanoseconds. + * + * We scale the NIC clock cycle by a large factor so that relatively + * small clock corrections can be added or subtracted at each clock + * tick. The drawbacks of a large factor are a) that the clock + * register overflows more quickly (not such a big deal) and b) that + * the increment per tick has to fit into 24 bits. As a result we + * need to use a shift of 19 so we can fit a value of 16 into the + * TIMINCA register. + * + * + * SYSTIMH SYSTIML + * +--------------+ +---+---+------+ + * 82576 | 32 | | 8 | 5 | 19 | + * +--------------+ +---+---+------+ + * \________ 45 bits _______/ fract + * + * +----------+---+ +--------------+ + * 82580 | 24 | 8 | | 32 | + * +----------+---+ +--------------+ + * reserved \______ 40 bits _____/ + * + * + * The 45 bit 82576 SYSTIM overflows every + * 2^45 * 10^-9 / 3600 = 9.77 hours. + * + * The 40 bit 82580 SYSTIM overflows every + * 2^40 * 10^-9 / 60 = 18.3 minutes. + * + * SYSTIM is converted to real time using a timecounter. As + * timecounter_cyc2time() allows old timestamps, the timecounter needs + * to be updated at least once per half of the SYSTIM interval. + * Scheduling of delayed work is not very accurate, and also the NIC + * clock can be adjusted to run up to 6% faster and the system clock + * up to 10% slower, so we aim for 6 minutes to be sure the actual + * interval in the NIC time is shorter than 9.16 minutes. + */ + +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6) +#define IGB_PTP_TX_TIMEOUT (HZ * 15) +#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) +#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) +#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT) +#define IGB_NBITS_82580 40 + +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); + +/* SYSTIM read access for the 82576 */ +static u64 igb_ptp_read_82576(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u64 val; + u32 lo, hi; + + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* SYSTIM read access for the 82580 */ +static u64 igb_ptp_read_82580(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u32 lo, hi; + u64 val; + + /* The timestamp latches on lowest register read. For the 82580 + * the lowest register is SYSTIMR instead of SYSTIML. However we only + * need to provide nanosecond resolution, so we just ignore it. + */ + rd32(E1000_SYSTIMR); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* SYSTIM read access for I210/I211 */ +static void igb_ptp_read_i210(struct igb_adapter *adapter, + struct timespec64 *ts) +{ + struct e1000_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp latches on lowest register read. For I210/I211, the + * lowest register is SYSTIMR. Since we only need to provide nanosecond + * resolution, we can ignore it. + */ + rd32(E1000_SYSTIMR); + nsec = rd32(E1000_SYSTIML); + sec = rd32(E1000_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igb_ptp_write_i210(struct igb_adapter *adapter, + const struct timespec64 *ts) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Writing the SYSTIMR register is not necessary as it only provides + * sub-nanosecond resolution. + */ + wr32(E1000_SYSTIML, ts->tv_nsec); + wr32(E1000_SYSTIMH, (u32)ts->tv_sec); +} + +/** + * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value. + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * The 'tmreg_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two (or three) 32 bit registers. The first + * read latches the value. Ditto for writing. + * + * In addition, here have extended the system time with an overflow + * counter in software. + **/ +static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + unsigned long flags; + u64 ns; + + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i354: + case e1000_i350: + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + ns = timecounter_cyc2time(&adapter->tc, systim); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); + break; + case e1000_i210: + case e1000_i211: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + break; + } +} + +/* PTP clock operations */ +static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 incvalue; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate = ppb; + rate <<= 14; + rate = div_u64(rate, 1953125); + + incvalue = 16 << IGB_82576_TSYNC_SHIFT; + + if (neg_adj) + incvalue -= rate; + else + incvalue += rate; + + wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK)); + + return 0; +} + +static int igb_ptp_adjfine_82580(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 13; + rate = div_u64(rate, 15625); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(E1000_TIMINCA, inca); + + return 0; +} + +static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + timecounter_adjtime(&igb->tc, delta); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + struct timespec64 now, then = ns_to_timespec64(delta); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_read_i210(igb, &now); + now = timespec64_add(now, then); + igb_ptp_write_i210(igb, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_gettimex_82576(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + u32 lo, hi; + u64 ns; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + lo = rd32(E1000_SYSTIML); + ptp_read_system_postts(sts); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int igb_ptp_gettimex_82580(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + u32 lo, hi; + u64 ns; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int igb_ptp_gettimex_i210(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + ts->tv_nsec = rd32(E1000_SYSTIML); + ts->tv_sec = rd32(E1000_SYSTIMH); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + u64 ns; + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + timecounter_init(&igb->tc, &igb->cc, ns); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_write_i210(igb, ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGB_N_SDP] = { + E1000_CTRL_SDP0_DIR, + E1000_CTRL_SDP1_DIR, + E1000_CTRL_EXT_SDP2_DIR, + E1000_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) +{ + static const u32 aux0_sel_sdp[IGB_N_SDP] = { + AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, + }; + static const u32 aux1_sel_sdp[IGB_N_SDP] = { + AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, + }; + static const u32 ts_sdp_en[IGB_N_SDP] = { + TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, + }; + struct e1000_hw *hw = &igb->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(E1000_CTRL); + ctrl_ext = rd32(E1000_CTRL_EXT); + tssdp = rd32(E1000_TSSDP); + + igb_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~AUX1_SEL_SDP3; + tssdp |= aux1_sel_sdp[pin] | AUX1_TS_SDP_EN; + } else { + tssdp &= ~AUX0_SEL_SDP3; + tssdp |= aux0_sel_sdp[pin] | AUX0_TS_SDP_EN; + } + + wr32(E1000_TSSDP, tssdp); + wr32(E1000_CTRL, ctrl); + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq) +{ + static const u32 aux0_sel_sdp[IGB_N_SDP] = { + AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, + }; + static const u32 aux1_sel_sdp[IGB_N_SDP] = { + AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, + }; + static const u32 ts_sdp_en[IGB_N_SDP] = { + TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, + }; + static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = { + TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0, + TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0, + }; + static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = { + TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, + TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, + }; + static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = { + TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0, + TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0, + }; + static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; + static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; + struct e1000_hw *hw = &igb->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(E1000_CTRL); + ctrl_ext = rd32(E1000_CTRL_EXT); + tssdp = rd32(E1000_TSSDP); + + igb_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin]) + tssdp &= ~AUX0_TS_SDP_EN; + + if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin]) + tssdp &= ~AUX1_TS_SDP_EN; + + tssdp &= ~ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= ts_sdp_sel_fc1[pin]; + else + tssdp |= ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= ts_sdp_sel_tt1[pin]; + else + tssdp |= ts_sdp_sel_tt0[pin]; + } + tssdp |= ts_sdp_en[pin]; + + wr32(E1000_TSSDP, tssdp); + wr32(E1000_CTRL, ctrl); + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igb_adapter *igb = + container_of(ptp, struct igb_adapter, ptp_caps); + struct e1000_hw *hw = &igb->hw; + u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = TSAUXC_EN_TS1; + tsim_mask = TSINTR_AUTT1; + } else { + tsauxc_mask = TSAUXC_EN_TS0; + tsim_mask = TSINTR_AUTT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (on) { + igb_pin_extts(igb, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && ((ns <= 70000000LL) || (ns == 125000000LL) || + (ns == 250000000LL) || (ns == 500000000LL))) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT1; + tsim_mask = TSINTR_TT1; + } + trgttiml = E1000_TRGTTIML1; + trgttimh = E1000_TRGTTIMH1; + freqout = E1000_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT0; + tsim_mask = TSINTR_TT0; + } + trgttiml = E1000_TRGTTIML0; + trgttimh = E1000_TRGTTIMH0; + freqout = E1000_FREQOUT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1); + tsim &= ~TSINTR_TT1; + } else { + tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0); + tsim &= ~TSINTR_TT0; + } + if (on) { + int i = rq->perout.index; + igb_pin_perout(igb, i, pin, use_freq); + igb->perout[i].start.tv_sec = rq->perout.start.sec; + igb->perout[i].start.tv_nsec = rq->perout.start.nsec; + igb->perout[i].period.tv_sec = ts.tv_sec; + igb->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, rq->perout.start.sec); + wr32(trgttiml, rq->perout.start.nsec); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsim = rd32(E1000_TSIM); + if (on) + tsim |= TSINTR_SYS_WRAP; + else + tsim &= ~TSINTR_SYS_WRAP; + igb->pps_sys_wrap_on = !!on; + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + } + + return -EOPNOTSUPP; +} + +static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static int igb_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igb_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. + **/ +static void igb_ptp_tx_work(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, struct igb_adapter, + ptp_tx_work); + struct e1000_hw *hw = &adapter->hw; + u32 tsynctxctl; + + if (!adapter->ptp_tx_skb) + return; + + if (time_is_before_jiffies(adapter->ptp_tx_start + + IGB_PTP_TX_TIMEOUT)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); + return; + } + + tsynctxctl = rd32(E1000_TSYNCTXCTL); + if (tsynctxctl & E1000_TSYNCTXCTL_VALID) + igb_ptp_tx_hwtstamp(adapter); + else + /* reschedule to check later */ + schedule_work(&adapter->ptp_tx_work); +} + +static void igb_ptp_overflow_check(struct work_struct *work) +{ + struct igb_adapter *igb = + container_of(work, struct igb_adapter, ptp_overflow_work.work); + struct timespec64 ts; + u64 ns; + + /* Update the timecounter */ + ns = timecounter_read(&igb->tc); + + ts = ns_to_timespec64(ns); + pr_debug("igb overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); + + schedule_delayed_work(&igb->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +} + +/** + * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + **/ +void igb_ptp_rx_hang(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); + unsigned long rx_event; + + /* Other hardware uses per-packet timestamps */ + if (hw->mac.type != e1000_82576) + return; + + /* If we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* Determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + if (time_after(adapter->last_rx_timestamp, rx_event)) + rx_event = adapter->last_rx_timestamp; + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(E1000_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n"); + } +} + +/** + * igb_ptp_tx_hang - detect error case where Tx timestamp never finishes + * @adapter: private network adapter structure + */ +void igb_ptp_tx_hang(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IGB_PTP_TX_TIMEOUT); + + if (!adapter->ptp_tx_skb) + return; + + if (!test_bit(__IGB_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + /* If we haven't received a timestamp within the timeout, it is + * reasonable to assume that it will never occur, so we can unlock the + * timestamp bit when this occurs. + */ + if (timeout) { + cancel_work_sync(&adapter->ptp_tx_work); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); + } +} + +/** + * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure. + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + **/ +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) +{ + struct sk_buff *skb = adapter->ptp_tx_skb; + struct e1000_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval; + int adjust = 0; + + regval = rd32(E1000_TXSTMPL); + regval |= (u64)rd32(E1000_TXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + /* adjust timestamp for the TX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_TX_LATENCY_1000; + break; + } + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + /* Clear the lock early before calling skb_tstamp_tx so that + * applications are not woken up before the lock bit is clear. We use + * a copy of the skb pointer to ensure other threads can't change it + * while we're notifying the stack. + */ + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + + /* Notify the stack and free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp + * @q_vector: Pointer to interrupt specific structure + * @va: Pointer to address containing Rx buffer + * @timestamp: Pointer where timestamp will be stored + * + * This function is meant to retrieve a timestamp from the first buffer of an + * incoming frame. The value is stored in little endian format starting on + * byte 8 + * + * Returns: The timestamp header length or 0 if not available + **/ +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + ktime_t *timestamp) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct skb_shared_hwtstamps ts; + __le64 *regval = (__le64 *)va; + int adjust = 0; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return 0; + + /* The timestamp is recorded in little endian format. + * DWORD: 0 1 2 3 + * Field: Reserved Reserved SYSTIML SYSTIMH + */ + + /* check reserved dwords are zero, be/le doesn't matter for zero */ + if (regval[0]) + return 0; + + igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1])); + + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_RX_LATENCY_1000; + break; + } + } + + *timestamp = ktime_sub_ns(ts.hwtstamp, adjust); + + return IGB_TS_HDR_LEN; +} + +/** + * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register + * @q_vector: Pointer to interrupt specific structure + * @skb: Buffer containing timestamp and packet + * + * This function is meant to retrieve a timestamp from the internal registers + * of the adapter and store it in the skb. + **/ +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + int adjust = 0; + u64 regval; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return; + + /* If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register + * values must belong to this one here and therefore we don't need to + * compare any of the additional attributes stored for it. + * + * If nothing went wrong, then it should have a shared tx_flags that we + * can turn into a skb_shared_hwtstamps. + */ + if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + regval = rd32(E1000_RXSTMPL); + regval |= (u64)rd32(E1000_RXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_RX_LATENCY_1000; + break; + } + } + skb_hwtstamps(skb)->hwtstamp = + ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + adapter->last_rx_timestamp = jiffies; +} + +/** + * igb_ptp_get_ts_config - get hardware time stamping config + * @netdev: netdev struct + * @ifr: interface struct + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * igb_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't case any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + */ +static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, + struct hwtstamp_config *config) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 tsync_rx_cfg = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + break; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + /* 82576 cannot timestamp all packets, which it needs to do to + * support both V1 Sync and Delay_Req messages + */ + if (hw->mac.type != e1000_82576) { + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + fallthrough; + default: + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + if (hw->mac.type == e1000_82575) { + if (tsync_rx_ctl | tsync_tx_ctl) + return -EINVAL; + return 0; + } + + /* Per-packet timestamping only works if all packets are + * timestamped, so enable timestamping in all packets as + * long as one Rx filter was configured. + */ + if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { + tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + is_l2 = true; + is_l4 = true; + + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + regval = rd32(E1000_RXPBS); + regval |= E1000_RXPBS_CFG_TS_EN; + wr32(E1000_RXPBS, regval); + } + } + + /* enable/disable TX */ + regval = rd32(E1000_TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(E1000_TSYNCTXCTL, regval); + + /* enable/disable RX */ + regval = rd32(E1000_TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(E1000_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ + wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); + + /* define ethertype filter for timestamped packets */ + if (is_l2) + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), + (E1000_ETQF_FILTER_ENABLE | /* enable filter */ + E1000_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), 0); + + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IPPROTO_UDP /* UDP */ + | E1000_FTQF_VF_BP /* VF not compared */ + | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ + | E1000_FTQF_MASK); /* mask all inputs */ + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ + + wr32(E1000_IMIR(3), (__force unsigned int)htons(PTP_EV_PORT)); + wr32(E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + if (hw->mac.type == e1000_82576) { + /* enable source port check */ + wr32(E1000_SPQF(3), (__force unsigned int)htons(PTP_EV_PORT)); + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + } + wr32(E1000_FTQF(3), ftqf); + } else { + wr32(E1000_FTQF(3), E1000_FTQF_MASK); + } + wrfl(); + + /* clear TX/RX time stamp registers, just to be sure */ + regval = rd32(E1000_TXSTMPL); + regval = rd32(E1000_TXSTMPH); + regval = rd32(E1000_RXSTMPL); + regval = rd32(E1000_RXSTMPH); + + return 0; +} + +/** + * igb_ptp_set_ts_config - set hardware time stamping config + * @netdev: netdev struct + * @ifr: interface struct + * + **/ +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igb_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igb_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igb_ptp_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int i; + + switch (hw->mac.type) { + case e1000_82576: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 999999881; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_feature_enable; + adapter->cc.read = igb_ptp_read_82576; + adapter->cc.mask = CYCLECOUNTER_MASK(64); + adapter->cc.mult = 1; + adapter->cc.shift = IGB_82576_TSYNC_SHIFT; + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; + break; + case e1000_82580: + case e1000_i354: + case e1000_i350: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_feature_enable; + adapter->cc.read = igb_ptp_read_82580; + adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); + adapter->cc.mult = 1; + adapter->cc.shift = 0; + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; + break; + case e1000_i210: + case e1000_i211: + for (i = 0; i < IGB_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS; + adapter->ptp_caps.n_per_out = IGB_N_PEROUT; + adapter->ptp_caps.n_pins = IGB_N_SDP; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_i210; + adapter->ptp_caps.settime64 = igb_ptp_settime_i210; + adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; + adapter->ptp_caps.verify = igb_ptp_verify_pin; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + igb_ptp_reset(adapter); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + dev_info(&adapter->pdev->dev, "added PHC on %s\n", + adapter->netdev->name); + adapter->ptp_flags |= IGB_PTP_ENABLED; + } +} + +/** + * igb_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igb_ptp_suspend(struct igb_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return; + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + cancel_delayed_work_sync(&adapter->ptp_overflow_work); + + cancel_work_sync(&adapter->ptp_tx_work); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + } +} + +/** + * igb_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igb_ptp_stop(struct igb_adapter *adapter) +{ + igb_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + dev_info(&adapter->pdev->dev, "removed PHC on %s\n", + adapter->netdev->name); + adapter->ptp_flags &= ~IGB_PTP_ENABLED; + } +} + +/** + * igb_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igb_ptp_reset(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + + /* reset the tstamp_config */ + igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* Dial the nominal frequency. */ + wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + break; + case e1000_82580: + case e1000_i354: + case e1000_i350: + case e1000_i210: + case e1000_i211: + wr32(E1000_TSAUXC, 0x0); + wr32(E1000_TSSDP, 0x0); + wr32(E1000_TSIM, + TSYNC_INTERRUPTS | + (adapter->pps_sys_wrap_on ? TSINTR_SYS_WRAP : 0)); + wr32(E1000_IMS, E1000_IMS_TS); + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { + struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); + + igb_ptp_write_i210(adapter, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + schedule_delayed_work(&adapter->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +} diff --git a/devices/igb/igb_ptp-5.14-orig.c b/devices/igb/igb_ptp-5.14-orig.c new file mode 100644 index 00000000..0011b15e --- /dev/null +++ b/devices/igb/igb_ptp-5.14-orig.c @@ -0,0 +1,1383 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2011 Richard Cochran */ + +#include +#include +#include +#include + +#include "igb.h" + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +/* The 82580 timesync updates the system timer every 8ns by 8ns, + * and this update value cannot be reprogrammed. + * + * Neither the 82576 nor the 82580 offer registers wide enough to hold + * nanoseconds time values for very long. For the 82580, SYSTIM always + * counts nanoseconds, but the upper 24 bits are not available. The + * frequency is adjusted by changing the 32 bit fractional nanoseconds + * register, TIMINCA. + * + * For the 82576, the SYSTIM register time unit is affect by the + * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this + * field are needed to provide the nominal 16 nanosecond period, + * leaving 19 bits for fractional nanoseconds. + * + * We scale the NIC clock cycle by a large factor so that relatively + * small clock corrections can be added or subtracted at each clock + * tick. The drawbacks of a large factor are a) that the clock + * register overflows more quickly (not such a big deal) and b) that + * the increment per tick has to fit into 24 bits. As a result we + * need to use a shift of 19 so we can fit a value of 16 into the + * TIMINCA register. + * + * + * SYSTIMH SYSTIML + * +--------------+ +---+---+------+ + * 82576 | 32 | | 8 | 5 | 19 | + * +--------------+ +---+---+------+ + * \________ 45 bits _______/ fract + * + * +----------+---+ +--------------+ + * 82580 | 24 | 8 | | 32 | + * +----------+---+ +--------------+ + * reserved \______ 40 bits _____/ + * + * + * The 45 bit 82576 SYSTIM overflows every + * 2^45 * 10^-9 / 3600 = 9.77 hours. + * + * The 40 bit 82580 SYSTIM overflows every + * 2^40 * 10^-9 / 60 = 18.3 minutes. + * + * SYSTIM is converted to real time using a timecounter. As + * timecounter_cyc2time() allows old timestamps, the timecounter needs + * to be updated at least once per half of the SYSTIM interval. + * Scheduling of delayed work is not very accurate, and also the NIC + * clock can be adjusted to run up to 6% faster and the system clock + * up to 10% slower, so we aim for 6 minutes to be sure the actual + * interval in the NIC time is shorter than 9.16 minutes. + */ + +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6) +#define IGB_PTP_TX_TIMEOUT (HZ * 15) +#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) +#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) +#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT) +#define IGB_NBITS_82580 40 + +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); + +/* SYSTIM read access for the 82576 */ +static u64 igb_ptp_read_82576(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u64 val; + u32 lo, hi; + + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* SYSTIM read access for the 82580 */ +static u64 igb_ptp_read_82580(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u32 lo, hi; + u64 val; + + /* The timestamp latches on lowest register read. For the 82580 + * the lowest register is SYSTIMR instead of SYSTIML. However we only + * need to provide nanosecond resolution, so we just ignore it. + */ + rd32(E1000_SYSTIMR); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* SYSTIM read access for I210/I211 */ +static void igb_ptp_read_i210(struct igb_adapter *adapter, + struct timespec64 *ts) +{ + struct e1000_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp latches on lowest register read. For I210/I211, the + * lowest register is SYSTIMR. Since we only need to provide nanosecond + * resolution, we can ignore it. + */ + rd32(E1000_SYSTIMR); + nsec = rd32(E1000_SYSTIML); + sec = rd32(E1000_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igb_ptp_write_i210(struct igb_adapter *adapter, + const struct timespec64 *ts) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Writing the SYSTIMR register is not necessary as it only provides + * sub-nanosecond resolution. + */ + wr32(E1000_SYSTIML, ts->tv_nsec); + wr32(E1000_SYSTIMH, (u32)ts->tv_sec); +} + +/** + * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value. + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * The 'tmreg_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two (or three) 32 bit registers. The first + * read latches the value. Ditto for writing. + * + * In addition, here have extended the system time with an overflow + * counter in software. + **/ +static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + unsigned long flags; + u64 ns; + + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i354: + case e1000_i350: + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + ns = timecounter_cyc2time(&adapter->tc, systim); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); + break; + case e1000_i210: + case e1000_i211: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + break; + } +} + +/* PTP clock operations */ +static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 incvalue; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate = ppb; + rate <<= 14; + rate = div_u64(rate, 1953125); + + incvalue = 16 << IGB_82576_TSYNC_SHIFT; + + if (neg_adj) + incvalue -= rate; + else + incvalue += rate; + + wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK)); + + return 0; +} + +static int igb_ptp_adjfine_82580(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 13; + rate = div_u64(rate, 15625); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(E1000_TIMINCA, inca); + + return 0; +} + +static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + timecounter_adjtime(&igb->tc, delta); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + struct timespec64 now, then = ns_to_timespec64(delta); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_read_i210(igb, &now); + now = timespec64_add(now, then); + igb_ptp_write_i210(igb, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_gettimex_82576(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + u32 lo, hi; + u64 ns; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + lo = rd32(E1000_SYSTIML); + ptp_read_system_postts(sts); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int igb_ptp_gettimex_82580(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + u32 lo, hi; + u64 ns; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int igb_ptp_gettimex_i210(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + ts->tv_nsec = rd32(E1000_SYSTIML); + ts->tv_sec = rd32(E1000_SYSTIMH); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + u64 ns; + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + timecounter_init(&igb->tc, &igb->cc, ns); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_write_i210(igb, ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGB_N_SDP] = { + E1000_CTRL_SDP0_DIR, + E1000_CTRL_SDP1_DIR, + E1000_CTRL_EXT_SDP2_DIR, + E1000_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) +{ + static const u32 aux0_sel_sdp[IGB_N_SDP] = { + AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, + }; + static const u32 aux1_sel_sdp[IGB_N_SDP] = { + AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, + }; + static const u32 ts_sdp_en[IGB_N_SDP] = { + TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, + }; + struct e1000_hw *hw = &igb->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(E1000_CTRL); + ctrl_ext = rd32(E1000_CTRL_EXT); + tssdp = rd32(E1000_TSSDP); + + igb_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~AUX1_SEL_SDP3; + tssdp |= aux1_sel_sdp[pin] | AUX1_TS_SDP_EN; + } else { + tssdp &= ~AUX0_SEL_SDP3; + tssdp |= aux0_sel_sdp[pin] | AUX0_TS_SDP_EN; + } + + wr32(E1000_TSSDP, tssdp); + wr32(E1000_CTRL, ctrl); + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq) +{ + static const u32 aux0_sel_sdp[IGB_N_SDP] = { + AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, + }; + static const u32 aux1_sel_sdp[IGB_N_SDP] = { + AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, + }; + static const u32 ts_sdp_en[IGB_N_SDP] = { + TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, + }; + static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = { + TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0, + TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0, + }; + static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = { + TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, + TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, + }; + static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = { + TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0, + TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0, + }; + static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; + static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; + struct e1000_hw *hw = &igb->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(E1000_CTRL); + ctrl_ext = rd32(E1000_CTRL_EXT); + tssdp = rd32(E1000_TSSDP); + + igb_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin]) + tssdp &= ~AUX0_TS_SDP_EN; + + if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin]) + tssdp &= ~AUX1_TS_SDP_EN; + + tssdp &= ~ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= ts_sdp_sel_fc1[pin]; + else + tssdp |= ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= ts_sdp_sel_tt1[pin]; + else + tssdp |= ts_sdp_sel_tt0[pin]; + } + tssdp |= ts_sdp_en[pin]; + + wr32(E1000_TSSDP, tssdp); + wr32(E1000_CTRL, ctrl); + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igb_adapter *igb = + container_of(ptp, struct igb_adapter, ptp_caps); + struct e1000_hw *hw = &igb->hw; + u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = TSAUXC_EN_TS1; + tsim_mask = TSINTR_AUTT1; + } else { + tsauxc_mask = TSAUXC_EN_TS0; + tsim_mask = TSINTR_AUTT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (on) { + igb_pin_extts(igb, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && ((ns <= 70000000LL) || (ns == 125000000LL) || + (ns == 250000000LL) || (ns == 500000000LL))) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT1; + tsim_mask = TSINTR_TT1; + } + trgttiml = E1000_TRGTTIML1; + trgttimh = E1000_TRGTTIMH1; + freqout = E1000_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT0; + tsim_mask = TSINTR_TT0; + } + trgttiml = E1000_TRGTTIML0; + trgttimh = E1000_TRGTTIMH0; + freqout = E1000_FREQOUT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1); + tsim &= ~TSINTR_TT1; + } else { + tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0); + tsim &= ~TSINTR_TT0; + } + if (on) { + int i = rq->perout.index; + igb_pin_perout(igb, i, pin, use_freq); + igb->perout[i].start.tv_sec = rq->perout.start.sec; + igb->perout[i].start.tv_nsec = rq->perout.start.nsec; + igb->perout[i].period.tv_sec = ts.tv_sec; + igb->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, rq->perout.start.sec); + wr32(trgttiml, rq->perout.start.nsec); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsim = rd32(E1000_TSIM); + if (on) + tsim |= TSINTR_SYS_WRAP; + else + tsim &= ~TSINTR_SYS_WRAP; + igb->pps_sys_wrap_on = !!on; + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + } + + return -EOPNOTSUPP; +} + +static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static int igb_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igb_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. + **/ +static void igb_ptp_tx_work(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, struct igb_adapter, + ptp_tx_work); + struct e1000_hw *hw = &adapter->hw; + u32 tsynctxctl; + + if (!adapter->ptp_tx_skb) + return; + + if (time_is_before_jiffies(adapter->ptp_tx_start + + IGB_PTP_TX_TIMEOUT)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); + return; + } + + tsynctxctl = rd32(E1000_TSYNCTXCTL); + if (tsynctxctl & E1000_TSYNCTXCTL_VALID) + igb_ptp_tx_hwtstamp(adapter); + else + /* reschedule to check later */ + schedule_work(&adapter->ptp_tx_work); +} + +static void igb_ptp_overflow_check(struct work_struct *work) +{ + struct igb_adapter *igb = + container_of(work, struct igb_adapter, ptp_overflow_work.work); + struct timespec64 ts; + u64 ns; + + /* Update the timecounter */ + ns = timecounter_read(&igb->tc); + + ts = ns_to_timespec64(ns); + pr_debug("igb overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); + + schedule_delayed_work(&igb->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +} + +/** + * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + **/ +void igb_ptp_rx_hang(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); + unsigned long rx_event; + + /* Other hardware uses per-packet timestamps */ + if (hw->mac.type != e1000_82576) + return; + + /* If we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* Determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + if (time_after(adapter->last_rx_timestamp, rx_event)) + rx_event = adapter->last_rx_timestamp; + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(E1000_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n"); + } +} + +/** + * igb_ptp_tx_hang - detect error case where Tx timestamp never finishes + * @adapter: private network adapter structure + */ +void igb_ptp_tx_hang(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IGB_PTP_TX_TIMEOUT); + + if (!adapter->ptp_tx_skb) + return; + + if (!test_bit(__IGB_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + /* If we haven't received a timestamp within the timeout, it is + * reasonable to assume that it will never occur, so we can unlock the + * timestamp bit when this occurs. + */ + if (timeout) { + cancel_work_sync(&adapter->ptp_tx_work); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); + } +} + +/** + * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure. + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + **/ +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) +{ + struct sk_buff *skb = adapter->ptp_tx_skb; + struct e1000_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval; + int adjust = 0; + + regval = rd32(E1000_TXSTMPL); + regval |= (u64)rd32(E1000_TXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + /* adjust timestamp for the TX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_TX_LATENCY_1000; + break; + } + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + /* Clear the lock early before calling skb_tstamp_tx so that + * applications are not woken up before the lock bit is clear. We use + * a copy of the skb pointer to ensure other threads can't change it + * while we're notifying the stack. + */ + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + + /* Notify the stack and free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp + * @q_vector: Pointer to interrupt specific structure + * @va: Pointer to address containing Rx buffer + * @timestamp: Pointer where timestamp will be stored + * + * This function is meant to retrieve a timestamp from the first buffer of an + * incoming frame. The value is stored in little endian format starting on + * byte 8 + * + * Returns: The timestamp header length or 0 if not available + **/ +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + ktime_t *timestamp) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct skb_shared_hwtstamps ts; + __le64 *regval = (__le64 *)va; + int adjust = 0; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return 0; + + /* The timestamp is recorded in little endian format. + * DWORD: 0 1 2 3 + * Field: Reserved Reserved SYSTIML SYSTIMH + */ + + /* check reserved dwords are zero, be/le doesn't matter for zero */ + if (regval[0]) + return 0; + + igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1])); + + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_RX_LATENCY_1000; + break; + } + } + + *timestamp = ktime_sub_ns(ts.hwtstamp, adjust); + + return IGB_TS_HDR_LEN; +} + +/** + * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register + * @q_vector: Pointer to interrupt specific structure + * @skb: Buffer containing timestamp and packet + * + * This function is meant to retrieve a timestamp from the internal registers + * of the adapter and store it in the skb. + **/ +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + int adjust = 0; + u64 regval; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return; + + /* If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register + * values must belong to this one here and therefore we don't need to + * compare any of the additional attributes stored for it. + * + * If nothing went wrong, then it should have a shared tx_flags that we + * can turn into a skb_shared_hwtstamps. + */ + if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + regval = rd32(E1000_RXSTMPL); + regval |= (u64)rd32(E1000_RXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_RX_LATENCY_1000; + break; + } + } + skb_hwtstamps(skb)->hwtstamp = + ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + adapter->last_rx_timestamp = jiffies; +} + +/** + * igb_ptp_get_ts_config - get hardware time stamping config + * @netdev: netdev struct + * @ifr: interface struct + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * igb_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't case any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + */ +static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, + struct hwtstamp_config *config) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 tsync_rx_cfg = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + break; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + /* 82576 cannot timestamp all packets, which it needs to do to + * support both V1 Sync and Delay_Req messages + */ + if (hw->mac.type != e1000_82576) { + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + fallthrough; + default: + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + if (hw->mac.type == e1000_82575) { + if (tsync_rx_ctl | tsync_tx_ctl) + return -EINVAL; + return 0; + } + + /* Per-packet timestamping only works if all packets are + * timestamped, so enable timestamping in all packets as + * long as one Rx filter was configured. + */ + if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { + tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + is_l2 = true; + is_l4 = true; + + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + regval = rd32(E1000_RXPBS); + regval |= E1000_RXPBS_CFG_TS_EN; + wr32(E1000_RXPBS, regval); + } + } + + /* enable/disable TX */ + regval = rd32(E1000_TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(E1000_TSYNCTXCTL, regval); + + /* enable/disable RX */ + regval = rd32(E1000_TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(E1000_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ + wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); + + /* define ethertype filter for timestamped packets */ + if (is_l2) + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), + (E1000_ETQF_FILTER_ENABLE | /* enable filter */ + E1000_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), 0); + + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IPPROTO_UDP /* UDP */ + | E1000_FTQF_VF_BP /* VF not compared */ + | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ + | E1000_FTQF_MASK); /* mask all inputs */ + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ + + wr32(E1000_IMIR(3), (__force unsigned int)htons(PTP_EV_PORT)); + wr32(E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + if (hw->mac.type == e1000_82576) { + /* enable source port check */ + wr32(E1000_SPQF(3), (__force unsigned int)htons(PTP_EV_PORT)); + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + } + wr32(E1000_FTQF(3), ftqf); + } else { + wr32(E1000_FTQF(3), E1000_FTQF_MASK); + } + wrfl(); + + /* clear TX/RX time stamp registers, just to be sure */ + regval = rd32(E1000_TXSTMPL); + regval = rd32(E1000_TXSTMPH); + regval = rd32(E1000_RXSTMPL); + regval = rd32(E1000_RXSTMPH); + + return 0; +} + +/** + * igb_ptp_set_ts_config - set hardware time stamping config + * @netdev: netdev struct + * @ifr: interface struct + * + **/ +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igb_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igb_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igb_ptp_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int i; + + switch (hw->mac.type) { + case e1000_82576: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 999999881; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_feature_enable; + adapter->cc.read = igb_ptp_read_82576; + adapter->cc.mask = CYCLECOUNTER_MASK(64); + adapter->cc.mult = 1; + adapter->cc.shift = IGB_82576_TSYNC_SHIFT; + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; + break; + case e1000_82580: + case e1000_i354: + case e1000_i350: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_feature_enable; + adapter->cc.read = igb_ptp_read_82580; + adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); + adapter->cc.mult = 1; + adapter->cc.shift = 0; + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; + break; + case e1000_i210: + case e1000_i211: + for (i = 0; i < IGB_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS; + adapter->ptp_caps.n_per_out = IGB_N_PEROUT; + adapter->ptp_caps.n_pins = IGB_N_SDP; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_i210; + adapter->ptp_caps.settime64 = igb_ptp_settime_i210; + adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; + adapter->ptp_caps.verify = igb_ptp_verify_pin; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + igb_ptp_reset(adapter); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + dev_info(&adapter->pdev->dev, "added PHC on %s\n", + adapter->netdev->name); + adapter->ptp_flags |= IGB_PTP_ENABLED; + } +} + +/** + * igb_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igb_ptp_suspend(struct igb_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return; + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + cancel_delayed_work_sync(&adapter->ptp_overflow_work); + + cancel_work_sync(&adapter->ptp_tx_work); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + } +} + +/** + * igb_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igb_ptp_stop(struct igb_adapter *adapter) +{ + igb_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + dev_info(&adapter->pdev->dev, "removed PHC on %s\n", + adapter->netdev->name); + adapter->ptp_flags &= ~IGB_PTP_ENABLED; + } +} + +/** + * igb_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igb_ptp_reset(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + + /* reset the tstamp_config */ + igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* Dial the nominal frequency. */ + wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + break; + case e1000_82580: + case e1000_i354: + case e1000_i350: + case e1000_i210: + case e1000_i211: + wr32(E1000_TSAUXC, 0x0); + wr32(E1000_TSSDP, 0x0); + wr32(E1000_TSIM, + TSYNC_INTERRUPTS | + (adapter->pps_sys_wrap_on ? TSINTR_SYS_WRAP : 0)); + wr32(E1000_IMS, E1000_IMS_TS); + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { + struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); + + igb_ptp_write_i210(adapter, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + schedule_delayed_work(&adapter->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +} diff --git a/devices/igb/igb_ptp-6.1-ethercat.c b/devices/igb/igb_ptp-6.1-ethercat.c new file mode 100644 index 00000000..ae970e21 --- /dev/null +++ b/devices/igb/igb_ptp-6.1-ethercat.c @@ -0,0 +1,1544 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2011 Richard Cochran */ + +#include +#include +#include +#include + +#include "igb-6.1-ethercat.h" + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +/* The 82580 timesync updates the system timer every 8ns by 8ns, + * and this update value cannot be reprogrammed. + * + * Neither the 82576 nor the 82580 offer registers wide enough to hold + * nanoseconds time values for very long. For the 82580, SYSTIM always + * counts nanoseconds, but the upper 24 bits are not available. The + * frequency is adjusted by changing the 32 bit fractional nanoseconds + * register, TIMINCA. + * + * For the 82576, the SYSTIM register time unit is affect by the + * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this + * field are needed to provide the nominal 16 nanosecond period, + * leaving 19 bits for fractional nanoseconds. + * + * We scale the NIC clock cycle by a large factor so that relatively + * small clock corrections can be added or subtracted at each clock + * tick. The drawbacks of a large factor are a) that the clock + * register overflows more quickly (not such a big deal) and b) that + * the increment per tick has to fit into 24 bits. As a result we + * need to use a shift of 19 so we can fit a value of 16 into the + * TIMINCA register. + * + * + * SYSTIMH SYSTIML + * +--------------+ +---+---+------+ + * 82576 | 32 | | 8 | 5 | 19 | + * +--------------+ +---+---+------+ + * \________ 45 bits _______/ fract + * + * +----------+---+ +--------------+ + * 82580 | 24 | 8 | | 32 | + * +----------+---+ +--------------+ + * reserved \______ 40 bits _____/ + * + * + * The 45 bit 82576 SYSTIM overflows every + * 2^45 * 10^-9 / 3600 = 9.77 hours. + * + * The 40 bit 82580 SYSTIM overflows every + * 2^40 * 10^-9 / 60 = 18.3 minutes. + * + * SYSTIM is converted to real time using a timecounter. As + * timecounter_cyc2time() allows old timestamps, the timecounter needs + * to be updated at least once per half of the SYSTIM interval. + * Scheduling of delayed work is not very accurate, and also the NIC + * clock can be adjusted to run up to 6% faster and the system clock + * up to 10% slower, so we aim for 6 minutes to be sure the actual + * interval in the NIC time is shorter than 9.16 minutes. + */ + +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6) +#define IGB_PTP_TX_TIMEOUT (HZ * 15) +#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) +#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) +#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT) +#define IGB_NBITS_82580 40 + +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); +static void igb_ptp_sdp_init(struct igb_adapter *adapter); + +/* SYSTIM read access for the 82576 */ +static u64 igb_ptp_read_82576(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u64 val; + u32 lo, hi; + + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* SYSTIM read access for the 82580 */ +static u64 igb_ptp_read_82580(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u32 lo, hi; + u64 val; + + /* The timestamp latches on lowest register read. For the 82580 + * the lowest register is SYSTIMR instead of SYSTIML. However we only + * need to provide nanosecond resolution, so we just ignore it. + */ + rd32(E1000_SYSTIMR); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* SYSTIM read access for I210/I211 */ +static void igb_ptp_read_i210(struct igb_adapter *adapter, + struct timespec64 *ts) +{ + struct e1000_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp latches on lowest register read. For I210/I211, the + * lowest register is SYSTIMR. Since we only need to provide nanosecond + * resolution, we can ignore it. + */ + rd32(E1000_SYSTIMR); + nsec = rd32(E1000_SYSTIML); + sec = rd32(E1000_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igb_ptp_write_i210(struct igb_adapter *adapter, + const struct timespec64 *ts) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Writing the SYSTIMR register is not necessary as it only provides + * sub-nanosecond resolution. + */ + wr32(E1000_SYSTIML, ts->tv_nsec); + wr32(E1000_SYSTIMH, (u32)ts->tv_sec); +} + +/** + * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value. + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * The 'tmreg_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two (or three) 32 bit registers. The first + * read latches the value. Ditto for writing. + * + * In addition, here have extended the system time with an overflow + * counter in software. + **/ +static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + unsigned long flags; + u64 ns; + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i354: + case e1000_i350: + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, systim); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamps->hwtstamp = ns_to_ktime(ns); + break; + case e1000_i210: + case e1000_i211: + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + break; + } +} + +/* PTP clock operations */ +static int igb_ptp_adjfine_82576(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 incvalue; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + + incvalue = INCVALUE_82576; + rate = mul_u64_u64_div_u64(incvalue, (u64)scaled_ppm, + 1000000ULL << 16); + + if (neg_adj) + incvalue -= rate; + else + incvalue += rate; + + wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK)); + + return 0; +} + +static int igb_ptp_adjfine_82580(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 13; + rate = div_u64(rate, 15625); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(E1000_TIMINCA, inca); + + return 0; +} + +static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + timecounter_adjtime(&igb->tc, delta); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + struct timespec64 now, then = ns_to_timespec64(delta); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_read_i210(igb, &now); + now = timespec64_add(now, then); + igb_ptp_write_i210(igb, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_gettimex_82576(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + u32 lo, hi; + u64 ns; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + lo = rd32(E1000_SYSTIML); + ptp_read_system_postts(sts); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int igb_ptp_gettimex_82580(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + u32 lo, hi; + u64 ns; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int igb_ptp_gettimex_i210(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + ts->tv_nsec = rd32(E1000_SYSTIML); + ts->tv_sec = rd32(E1000_SYSTIMH); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + u64 ns; + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + timecounter_init(&igb->tc, &igb->cc, ns); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_write_i210(igb, ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGB_N_SDP] = { + E1000_CTRL_SDP0_DIR, + E1000_CTRL_SDP1_DIR, + E1000_CTRL_EXT_SDP2_DIR, + E1000_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) +{ + static const u32 aux0_sel_sdp[IGB_N_SDP] = { + AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, + }; + static const u32 aux1_sel_sdp[IGB_N_SDP] = { + AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, + }; + static const u32 ts_sdp_en[IGB_N_SDP] = { + TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, + }; + struct e1000_hw *hw = &igb->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(E1000_CTRL); + ctrl_ext = rd32(E1000_CTRL_EXT); + tssdp = rd32(E1000_TSSDP); + + igb_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~AUX1_SEL_SDP3; + tssdp |= aux1_sel_sdp[pin] | AUX1_TS_SDP_EN; + } else { + tssdp &= ~AUX0_SEL_SDP3; + tssdp |= aux0_sel_sdp[pin] | AUX0_TS_SDP_EN; + } + + wr32(E1000_TSSDP, tssdp); + wr32(E1000_CTRL, ctrl); + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq) +{ + static const u32 aux0_sel_sdp[IGB_N_SDP] = { + AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, + }; + static const u32 aux1_sel_sdp[IGB_N_SDP] = { + AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, + }; + static const u32 ts_sdp_en[IGB_N_SDP] = { + TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, + }; + static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = { + TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0, + TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0, + }; + static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = { + TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, + TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, + }; + static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = { + TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0, + TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0, + }; + static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; + static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; + struct e1000_hw *hw = &igb->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(E1000_CTRL); + ctrl_ext = rd32(E1000_CTRL_EXT); + tssdp = rd32(E1000_TSSDP); + + igb_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin]) + tssdp &= ~AUX0_TS_SDP_EN; + + if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin]) + tssdp &= ~AUX1_TS_SDP_EN; + + tssdp &= ~ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= ts_sdp_sel_fc1[pin]; + else + tssdp |= ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= ts_sdp_sel_tt1[pin]; + else + tssdp |= ts_sdp_sel_tt0[pin]; + } + tssdp |= ts_sdp_en[pin]; + + wr32(E1000_TSSDP, tssdp); + wr32(E1000_CTRL, ctrl); + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igb_adapter *igb = + container_of(ptp, struct igb_adapter, ptp_caps); + u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, systiml, + systimh, level_mask, level, rem; + struct e1000_hw *hw = &igb->hw; + struct timespec64 ts, start; + unsigned long flags; + u64 systim, now; + int pin = -1; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = TSAUXC_EN_TS1; + tsim_mask = TSINTR_AUTT1; + } else { + tsauxc_mask = TSAUXC_EN_TS0; + tsim_mask = TSINTR_AUTT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (on) { + igb_pin_extts(igb, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && ns < 8LL) + return -EINVAL; + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + tsauxc_mask = TSAUXC_EN_TT1; + tsim_mask = TSINTR_TT1; + trgttiml = E1000_TRGTTIML1; + trgttimh = E1000_TRGTTIMH1; + } else { + tsauxc_mask = TSAUXC_EN_TT0; + tsim_mask = TSINTR_TT0; + trgttiml = E1000_TRGTTIML0; + trgttimh = E1000_TRGTTIMH0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1); + tsim &= ~TSINTR_TT1; + } else { + tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0); + tsim &= ~TSINTR_TT0; + } + if (on) { + int i = rq->perout.index; + + /* read systim registers in sequence */ + rd32(E1000_SYSTIMR); + systiml = rd32(E1000_SYSTIML); + systimh = rd32(E1000_SYSTIMH); + systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml); + now = timecounter_cyc2time(&igb->tc, systim); + + if (pin < 2) { + level_mask = (i == 1) ? 0x80000 : 0x40000; + level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0; + } else { + level_mask = (i == 1) ? 0x80 : 0x40; + level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0; + } + + div_u64_rem(now, ns, &rem); + systim = systim + (ns - rem); + + /* synchronize pin level with rising/falling edges */ + div_u64_rem(now, ns << 1, &rem); + if (rem < ns) { + /* first half of period */ + if (level == 0) { + /* output is already low, skip this period */ + systim += ns; + } + } else { + /* second half of period */ + if (level == 1) { + /* output is already high, skip this period */ + systim += ns; + } + } + + start = ns_to_timespec64(systim + (ns - rem)); + igb_pin_perout(igb, i, pin, 0); + igb->perout[i].start.tv_sec = start.tv_sec; + igb->perout[i].start.tv_nsec = start.tv_nsec; + igb->perout[i].period.tv_sec = ts.tv_sec; + igb->perout[i].period.tv_nsec = ts.tv_nsec; + + wr32(trgttiml, (u32)systim); + wr32(trgttimh, ((u32)(systim >> 32)) & 0xFF); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igb_adapter *igb = + container_of(ptp, struct igb_adapter, ptp_caps); + struct e1000_hw *hw = &igb->hw; + u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = TSAUXC_EN_TS1; + tsim_mask = TSINTR_AUTT1; + } else { + tsauxc_mask = TSAUXC_EN_TS0; + tsim_mask = TSINTR_AUTT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (on) { + igb_pin_extts(igb, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && ((ns <= 70000000LL) || (ns == 125000000LL) || + (ns == 250000000LL) || (ns == 500000000LL))) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT1; + tsim_mask = TSINTR_TT1; + } + trgttiml = E1000_TRGTTIML1; + trgttimh = E1000_TRGTTIMH1; + freqout = E1000_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT0; + tsim_mask = TSINTR_TT0; + } + trgttiml = E1000_TRGTTIML0; + trgttimh = E1000_TRGTTIMH0; + freqout = E1000_FREQOUT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1); + tsim &= ~TSINTR_TT1; + } else { + tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0); + tsim &= ~TSINTR_TT0; + } + if (on) { + int i = rq->perout.index; + igb_pin_perout(igb, i, pin, use_freq); + igb->perout[i].start.tv_sec = rq->perout.start.sec; + igb->perout[i].start.tv_nsec = rq->perout.start.nsec; + igb->perout[i].period.tv_sec = ts.tv_sec; + igb->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, rq->perout.start.sec); + wr32(trgttiml, rq->perout.start.nsec); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsim = rd32(E1000_TSIM); + if (on) + tsim |= TSINTR_SYS_WRAP; + else + tsim &= ~TSINTR_SYS_WRAP; + igb->pps_sys_wrap_on = !!on; + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + } + + return -EOPNOTSUPP; +} + +static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static int igb_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igb_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. + **/ +static void igb_ptp_tx_work(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, struct igb_adapter, + ptp_tx_work); + struct e1000_hw *hw = &adapter->hw; + u32 tsynctxctl; + + if (!adapter->ptp_tx_skb) + return; + + if (time_is_before_jiffies(adapter->ptp_tx_start + + IGB_PTP_TX_TIMEOUT)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); + return; + } + + tsynctxctl = rd32(E1000_TSYNCTXCTL); + if (tsynctxctl & E1000_TSYNCTXCTL_VALID) + igb_ptp_tx_hwtstamp(adapter); + else + /* reschedule to check later */ + schedule_work(&adapter->ptp_tx_work); +} + +static void igb_ptp_overflow_check(struct work_struct *work) +{ + struct igb_adapter *igb = + container_of(work, struct igb_adapter, ptp_overflow_work.work); + struct timespec64 ts; + u64 ns; + + /* Update the timecounter */ + ns = timecounter_read(&igb->tc); + + ts = ns_to_timespec64(ns); + pr_debug("igb overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); + + schedule_delayed_work(&igb->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +} + +/** + * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + **/ +void igb_ptp_rx_hang(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); + unsigned long rx_event; + + /* Other hardware uses per-packet timestamps */ + if (hw->mac.type != e1000_82576) + return; + + /* If we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* Determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + if (time_after(adapter->last_rx_timestamp, rx_event)) + rx_event = adapter->last_rx_timestamp; + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(E1000_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n"); + } +} + +/** + * igb_ptp_tx_hang - detect error case where Tx timestamp never finishes + * @adapter: private network adapter structure + */ +void igb_ptp_tx_hang(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IGB_PTP_TX_TIMEOUT); + + if (!adapter->ptp_tx_skb) + return; + + if (!test_bit(__IGB_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + /* If we haven't received a timestamp within the timeout, it is + * reasonable to assume that it will never occur, so we can unlock the + * timestamp bit when this occurs. + */ + if (timeout) { + cancel_work_sync(&adapter->ptp_tx_work); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); + } +} + +/** + * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure. + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + **/ +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) +{ + struct sk_buff *skb = adapter->ptp_tx_skb; + struct e1000_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval; + int adjust = 0; + + regval = rd32(E1000_TXSTMPL); + regval |= (u64)rd32(E1000_TXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + /* adjust timestamp for the TX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_TX_LATENCY_1000; + break; + } + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + /* Clear the lock early before calling skb_tstamp_tx so that + * applications are not woken up before the lock bit is clear. We use + * a copy of the skb pointer to ensure other threads can't change it + * while we're notifying the stack. + */ + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + + /* Notify the stack and free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp + * @q_vector: Pointer to interrupt specific structure + * @va: Pointer to address containing Rx buffer + * @timestamp: Pointer where timestamp will be stored + * + * This function is meant to retrieve a timestamp from the first buffer of an + * incoming frame. The value is stored in little endian format starting on + * byte 8 + * + * Returns: The timestamp header length or 0 if not available + **/ +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + ktime_t *timestamp) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct skb_shared_hwtstamps ts; + __le64 *regval = (__le64 *)va; + int adjust = 0; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return 0; + + /* The timestamp is recorded in little endian format. + * DWORD: 0 1 2 3 + * Field: Reserved Reserved SYSTIML SYSTIMH + */ + + /* check reserved dwords are zero, be/le doesn't matter for zero */ + if (regval[0]) + return 0; + + igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1])); + + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_RX_LATENCY_1000; + break; + } + } + + *timestamp = ktime_sub_ns(ts.hwtstamp, adjust); + + return IGB_TS_HDR_LEN; +} + +/** + * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register + * @q_vector: Pointer to interrupt specific structure + * @skb: Buffer containing timestamp and packet + * + * This function is meant to retrieve a timestamp from the internal registers + * of the adapter and store it in the skb. + **/ +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + int adjust = 0; + u64 regval; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return; + + /* If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register + * values must belong to this one here and therefore we don't need to + * compare any of the additional attributes stored for it. + * + * If nothing went wrong, then it should have a shared tx_flags that we + * can turn into a skb_shared_hwtstamps. + */ + if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + regval = rd32(E1000_RXSTMPL); + regval |= (u64)rd32(E1000_RXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_RX_LATENCY_1000; + break; + } + } + skb_hwtstamps(skb)->hwtstamp = + ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + adapter->last_rx_timestamp = jiffies; +} + +/** + * igb_ptp_get_ts_config - get hardware time stamping config + * @netdev: netdev struct + * @ifr: interface struct + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * igb_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't case any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + */ +static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, + struct hwtstamp_config *config) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 tsync_rx_cfg = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + break; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + /* 82576 cannot timestamp all packets, which it needs to do to + * support both V1 Sync and Delay_Req messages + */ + if (hw->mac.type != e1000_82576) { + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + fallthrough; + default: + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + if (hw->mac.type == e1000_82575) { + if (tsync_rx_ctl | tsync_tx_ctl) + return -EINVAL; + return 0; + } + + /* Per-packet timestamping only works if all packets are + * timestamped, so enable timestamping in all packets as + * long as one Rx filter was configured. + */ + if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { + tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + is_l2 = true; + is_l4 = true; + + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + regval = rd32(E1000_RXPBS); + regval |= E1000_RXPBS_CFG_TS_EN; + wr32(E1000_RXPBS, regval); + } + } + + /* enable/disable TX */ + regval = rd32(E1000_TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(E1000_TSYNCTXCTL, regval); + + /* enable/disable RX */ + regval = rd32(E1000_TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(E1000_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ + wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); + + /* define ethertype filter for timestamped packets */ + if (is_l2) + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), + (E1000_ETQF_FILTER_ENABLE | /* enable filter */ + E1000_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), 0); + + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IPPROTO_UDP /* UDP */ + | E1000_FTQF_VF_BP /* VF not compared */ + | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ + | E1000_FTQF_MASK); /* mask all inputs */ + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ + + wr32(E1000_IMIR(3), (__force unsigned int)htons(PTP_EV_PORT)); + wr32(E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + if (hw->mac.type == e1000_82576) { + /* enable source port check */ + wr32(E1000_SPQF(3), (__force unsigned int)htons(PTP_EV_PORT)); + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + } + wr32(E1000_FTQF(3), ftqf); + } else { + wr32(E1000_FTQF(3), E1000_FTQF_MASK); + } + wrfl(); + + /* clear TX/RX time stamp registers, just to be sure */ + regval = rd32(E1000_TXSTMPL); + regval = rd32(E1000_TXSTMPH); + regval = rd32(E1000_RXSTMPL); + regval = rd32(E1000_RXSTMPH); + + return 0; +} + +/** + * igb_ptp_set_ts_config - set hardware time stamping config + * @netdev: netdev struct + * @ifr: interface struct + * + **/ +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igb_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igb_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igb_ptp_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + switch (hw->mac.type) { + case e1000_82576: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 999999881; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfine = igb_ptp_adjfine_82576; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_feature_enable; + adapter->cc.read = igb_ptp_read_82576; + adapter->cc.mask = CYCLECOUNTER_MASK(64); + adapter->cc.mult = 1; + adapter->cc.shift = IGB_82576_TSYNC_SHIFT; + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; + break; + case e1000_82580: + case e1000_i354: + case e1000_i350: + igb_ptp_sdp_init(adapter); + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS; + adapter->ptp_caps.n_per_out = IGB_N_PEROUT; + adapter->ptp_caps.n_pins = IGB_N_SDP; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_feature_enable_82580; + adapter->ptp_caps.verify = igb_ptp_verify_pin; + adapter->cc.read = igb_ptp_read_82580; + adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); + adapter->cc.mult = 1; + adapter->cc.shift = 0; + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; + break; + case e1000_i210: + case e1000_i211: + igb_ptp_sdp_init(adapter); + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS; + adapter->ptp_caps.n_per_out = IGB_N_PEROUT; + adapter->ptp_caps.n_pins = IGB_N_SDP; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_i210; + adapter->ptp_caps.settime64 = igb_ptp_settime_i210; + adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; + adapter->ptp_caps.verify = igb_ptp_verify_pin; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + igb_ptp_reset(adapter); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + dev_info(&adapter->pdev->dev, "added PHC on %s\n", + adapter->netdev->name); + adapter->ptp_flags |= IGB_PTP_ENABLED; + } +} + +/** + * igb_ptp_sdp_init - utility function which inits the SDP config structs + * @adapter: Board private structure. + **/ +void igb_ptp_sdp_init(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < IGB_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } +} + +/** + * igb_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igb_ptp_suspend(struct igb_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return; + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + cancel_delayed_work_sync(&adapter->ptp_overflow_work); + + cancel_work_sync(&adapter->ptp_tx_work); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + } +} + +/** + * igb_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igb_ptp_stop(struct igb_adapter *adapter) +{ + igb_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + dev_info(&adapter->pdev->dev, "removed PHC on %s\n", + adapter->netdev->name); + adapter->ptp_flags &= ~IGB_PTP_ENABLED; + } +} + +/** + * igb_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igb_ptp_reset(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + + /* reset the tstamp_config */ + igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* Dial the nominal frequency. */ + wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + break; + case e1000_82580: + case e1000_i354: + case e1000_i350: + case e1000_i210: + case e1000_i211: + wr32(E1000_TSAUXC, 0x0); + wr32(E1000_TSSDP, 0x0); + wr32(E1000_TSIM, + TSYNC_INTERRUPTS | + (adapter->pps_sys_wrap_on ? TSINTR_SYS_WRAP : 0)); + wr32(E1000_IMS, E1000_IMS_TS); + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { + struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); + + igb_ptp_write_i210(adapter, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + schedule_delayed_work(&adapter->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +} diff --git a/devices/igb/igb_ptp-6.1-orig.c b/devices/igb/igb_ptp-6.1-orig.c new file mode 100644 index 00000000..15e57460 --- /dev/null +++ b/devices/igb/igb_ptp-6.1-orig.c @@ -0,0 +1,1544 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2011 Richard Cochran */ + +#include +#include +#include +#include + +#include "igb.h" + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +/* The 82580 timesync updates the system timer every 8ns by 8ns, + * and this update value cannot be reprogrammed. + * + * Neither the 82576 nor the 82580 offer registers wide enough to hold + * nanoseconds time values for very long. For the 82580, SYSTIM always + * counts nanoseconds, but the upper 24 bits are not available. The + * frequency is adjusted by changing the 32 bit fractional nanoseconds + * register, TIMINCA. + * + * For the 82576, the SYSTIM register time unit is affect by the + * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this + * field are needed to provide the nominal 16 nanosecond period, + * leaving 19 bits for fractional nanoseconds. + * + * We scale the NIC clock cycle by a large factor so that relatively + * small clock corrections can be added or subtracted at each clock + * tick. The drawbacks of a large factor are a) that the clock + * register overflows more quickly (not such a big deal) and b) that + * the increment per tick has to fit into 24 bits. As a result we + * need to use a shift of 19 so we can fit a value of 16 into the + * TIMINCA register. + * + * + * SYSTIMH SYSTIML + * +--------------+ +---+---+------+ + * 82576 | 32 | | 8 | 5 | 19 | + * +--------------+ +---+---+------+ + * \________ 45 bits _______/ fract + * + * +----------+---+ +--------------+ + * 82580 | 24 | 8 | | 32 | + * +----------+---+ +--------------+ + * reserved \______ 40 bits _____/ + * + * + * The 45 bit 82576 SYSTIM overflows every + * 2^45 * 10^-9 / 3600 = 9.77 hours. + * + * The 40 bit 82580 SYSTIM overflows every + * 2^40 * 10^-9 / 60 = 18.3 minutes. + * + * SYSTIM is converted to real time using a timecounter. As + * timecounter_cyc2time() allows old timestamps, the timecounter needs + * to be updated at least once per half of the SYSTIM interval. + * Scheduling of delayed work is not very accurate, and also the NIC + * clock can be adjusted to run up to 6% faster and the system clock + * up to 10% slower, so we aim for 6 minutes to be sure the actual + * interval in the NIC time is shorter than 9.16 minutes. + */ + +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 6) +#define IGB_PTP_TX_TIMEOUT (HZ * 15) +#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) +#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) +#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT) +#define IGB_NBITS_82580 40 + +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); +static void igb_ptp_sdp_init(struct igb_adapter *adapter); + +/* SYSTIM read access for the 82576 */ +static u64 igb_ptp_read_82576(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u64 val; + u32 lo, hi; + + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* SYSTIM read access for the 82580 */ +static u64 igb_ptp_read_82580(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u32 lo, hi; + u64 val; + + /* The timestamp latches on lowest register read. For the 82580 + * the lowest register is SYSTIMR instead of SYSTIML. However we only + * need to provide nanosecond resolution, so we just ignore it. + */ + rd32(E1000_SYSTIMR); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* SYSTIM read access for I210/I211 */ +static void igb_ptp_read_i210(struct igb_adapter *adapter, + struct timespec64 *ts) +{ + struct e1000_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp latches on lowest register read. For I210/I211, the + * lowest register is SYSTIMR. Since we only need to provide nanosecond + * resolution, we can ignore it. + */ + rd32(E1000_SYSTIMR); + nsec = rd32(E1000_SYSTIML); + sec = rd32(E1000_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igb_ptp_write_i210(struct igb_adapter *adapter, + const struct timespec64 *ts) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Writing the SYSTIMR register is not necessary as it only provides + * sub-nanosecond resolution. + */ + wr32(E1000_SYSTIML, ts->tv_nsec); + wr32(E1000_SYSTIMH, (u32)ts->tv_sec); +} + +/** + * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value. + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * The 'tmreg_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two (or three) 32 bit registers. The first + * read latches the value. Ditto for writing. + * + * In addition, here have extended the system time with an overflow + * counter in software. + **/ +static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + unsigned long flags; + u64 ns; + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i354: + case e1000_i350: + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, systim); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + hwtstamps->hwtstamp = ns_to_ktime(ns); + break; + case e1000_i210: + case e1000_i211: + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + break; + } +} + +/* PTP clock operations */ +static int igb_ptp_adjfine_82576(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 incvalue; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + + incvalue = INCVALUE_82576; + rate = mul_u64_u64_div_u64(incvalue, (u64)scaled_ppm, + 1000000ULL << 16); + + if (neg_adj) + incvalue -= rate; + else + incvalue += rate; + + wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK)); + + return 0; +} + +static int igb_ptp_adjfine_82580(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 13; + rate = div_u64(rate, 15625); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(E1000_TIMINCA, inca); + + return 0; +} + +static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + timecounter_adjtime(&igb->tc, delta); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + struct timespec64 now, then = ns_to_timespec64(delta); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_read_i210(igb, &now); + now = timespec64_add(now, then); + igb_ptp_write_i210(igb, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_gettimex_82576(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + u32 lo, hi; + u64 ns; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + lo = rd32(E1000_SYSTIML); + ptp_read_system_postts(sts); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int igb_ptp_gettimex_82580(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + u32 lo, hi; + u64 ns; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int igb_ptp_gettimex_i210(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + ts->tv_nsec = rd32(E1000_SYSTIML); + ts->tv_sec = rd32(E1000_SYSTIMH); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + u64 ns; + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + timecounter_init(&igb->tc, &igb->cc, ns); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_write_i210(igb, ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGB_N_SDP] = { + E1000_CTRL_SDP0_DIR, + E1000_CTRL_SDP1_DIR, + E1000_CTRL_EXT_SDP2_DIR, + E1000_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) +{ + static const u32 aux0_sel_sdp[IGB_N_SDP] = { + AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, + }; + static const u32 aux1_sel_sdp[IGB_N_SDP] = { + AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, + }; + static const u32 ts_sdp_en[IGB_N_SDP] = { + TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, + }; + struct e1000_hw *hw = &igb->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(E1000_CTRL); + ctrl_ext = rd32(E1000_CTRL_EXT); + tssdp = rd32(E1000_TSSDP); + + igb_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~AUX1_SEL_SDP3; + tssdp |= aux1_sel_sdp[pin] | AUX1_TS_SDP_EN; + } else { + tssdp &= ~AUX0_SEL_SDP3; + tssdp |= aux0_sel_sdp[pin] | AUX0_TS_SDP_EN; + } + + wr32(E1000_TSSDP, tssdp); + wr32(E1000_CTRL, ctrl); + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq) +{ + static const u32 aux0_sel_sdp[IGB_N_SDP] = { + AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, + }; + static const u32 aux1_sel_sdp[IGB_N_SDP] = { + AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, + }; + static const u32 ts_sdp_en[IGB_N_SDP] = { + TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, + }; + static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = { + TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0, + TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0, + }; + static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = { + TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, + TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, + }; + static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = { + TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0, + TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0, + }; + static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; + static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; + struct e1000_hw *hw = &igb->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(E1000_CTRL); + ctrl_ext = rd32(E1000_CTRL_EXT); + tssdp = rd32(E1000_TSSDP); + + igb_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin]) + tssdp &= ~AUX0_TS_SDP_EN; + + if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin]) + tssdp &= ~AUX1_TS_SDP_EN; + + tssdp &= ~ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= ts_sdp_sel_fc1[pin]; + else + tssdp |= ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= ts_sdp_sel_tt1[pin]; + else + tssdp |= ts_sdp_sel_tt0[pin]; + } + tssdp |= ts_sdp_en[pin]; + + wr32(E1000_TSSDP, tssdp); + wr32(E1000_CTRL, ctrl); + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igb_adapter *igb = + container_of(ptp, struct igb_adapter, ptp_caps); + u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, systiml, + systimh, level_mask, level, rem; + struct e1000_hw *hw = &igb->hw; + struct timespec64 ts, start; + unsigned long flags; + u64 systim, now; + int pin = -1; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = TSAUXC_EN_TS1; + tsim_mask = TSINTR_AUTT1; + } else { + tsauxc_mask = TSAUXC_EN_TS0; + tsim_mask = TSINTR_AUTT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (on) { + igb_pin_extts(igb, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && ns < 8LL) + return -EINVAL; + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + tsauxc_mask = TSAUXC_EN_TT1; + tsim_mask = TSINTR_TT1; + trgttiml = E1000_TRGTTIML1; + trgttimh = E1000_TRGTTIMH1; + } else { + tsauxc_mask = TSAUXC_EN_TT0; + tsim_mask = TSINTR_TT0; + trgttiml = E1000_TRGTTIML0; + trgttimh = E1000_TRGTTIMH0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1); + tsim &= ~TSINTR_TT1; + } else { + tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0); + tsim &= ~TSINTR_TT0; + } + if (on) { + int i = rq->perout.index; + + /* read systim registers in sequence */ + rd32(E1000_SYSTIMR); + systiml = rd32(E1000_SYSTIML); + systimh = rd32(E1000_SYSTIMH); + systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml); + now = timecounter_cyc2time(&igb->tc, systim); + + if (pin < 2) { + level_mask = (i == 1) ? 0x80000 : 0x40000; + level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0; + } else { + level_mask = (i == 1) ? 0x80 : 0x40; + level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0; + } + + div_u64_rem(now, ns, &rem); + systim = systim + (ns - rem); + + /* synchronize pin level with rising/falling edges */ + div_u64_rem(now, ns << 1, &rem); + if (rem < ns) { + /* first half of period */ + if (level == 0) { + /* output is already low, skip this period */ + systim += ns; + } + } else { + /* second half of period */ + if (level == 1) { + /* output is already high, skip this period */ + systim += ns; + } + } + + start = ns_to_timespec64(systim + (ns - rem)); + igb_pin_perout(igb, i, pin, 0); + igb->perout[i].start.tv_sec = start.tv_sec; + igb->perout[i].start.tv_nsec = start.tv_nsec; + igb->perout[i].period.tv_sec = ts.tv_sec; + igb->perout[i].period.tv_nsec = ts.tv_nsec; + + wr32(trgttiml, (u32)systim); + wr32(trgttimh, ((u32)(systim >> 32)) & 0xFF); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igb_adapter *igb = + container_of(ptp, struct igb_adapter, ptp_caps); + struct e1000_hw *hw = &igb->hw; + u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = TSAUXC_EN_TS1; + tsim_mask = TSINTR_AUTT1; + } else { + tsauxc_mask = TSAUXC_EN_TS0; + tsim_mask = TSINTR_AUTT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (on) { + igb_pin_extts(igb, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && ((ns <= 70000000LL) || (ns == 125000000LL) || + (ns == 250000000LL) || (ns == 500000000LL))) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT1; + tsim_mask = TSINTR_TT1; + } + trgttiml = E1000_TRGTTIML1; + trgttimh = E1000_TRGTTIMH1; + freqout = E1000_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT0; + tsim_mask = TSINTR_TT0; + } + trgttiml = E1000_TRGTTIML0; + trgttimh = E1000_TRGTTIMH0; + freqout = E1000_FREQOUT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1); + tsim &= ~TSINTR_TT1; + } else { + tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0); + tsim &= ~TSINTR_TT0; + } + if (on) { + int i = rq->perout.index; + igb_pin_perout(igb, i, pin, use_freq); + igb->perout[i].start.tv_sec = rq->perout.start.sec; + igb->perout[i].start.tv_nsec = rq->perout.start.nsec; + igb->perout[i].period.tv_sec = ts.tv_sec; + igb->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, rq->perout.start.sec); + wr32(trgttiml, rq->perout.start.nsec); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsim = rd32(E1000_TSIM); + if (on) + tsim |= TSINTR_SYS_WRAP; + else + tsim &= ~TSINTR_SYS_WRAP; + igb->pps_sys_wrap_on = !!on; + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + } + + return -EOPNOTSUPP; +} + +static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static int igb_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igb_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. + **/ +static void igb_ptp_tx_work(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, struct igb_adapter, + ptp_tx_work); + struct e1000_hw *hw = &adapter->hw; + u32 tsynctxctl; + + if (!adapter->ptp_tx_skb) + return; + + if (time_is_before_jiffies(adapter->ptp_tx_start + + IGB_PTP_TX_TIMEOUT)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); + return; + } + + tsynctxctl = rd32(E1000_TSYNCTXCTL); + if (tsynctxctl & E1000_TSYNCTXCTL_VALID) + igb_ptp_tx_hwtstamp(adapter); + else + /* reschedule to check later */ + schedule_work(&adapter->ptp_tx_work); +} + +static void igb_ptp_overflow_check(struct work_struct *work) +{ + struct igb_adapter *igb = + container_of(work, struct igb_adapter, ptp_overflow_work.work); + struct timespec64 ts; + u64 ns; + + /* Update the timecounter */ + ns = timecounter_read(&igb->tc); + + ts = ns_to_timespec64(ns); + pr_debug("igb overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); + + schedule_delayed_work(&igb->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +} + +/** + * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + **/ +void igb_ptp_rx_hang(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); + unsigned long rx_event; + + /* Other hardware uses per-packet timestamps */ + if (hw->mac.type != e1000_82576) + return; + + /* If we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* Determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + if (time_after(adapter->last_rx_timestamp, rx_event)) + rx_event = adapter->last_rx_timestamp; + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(E1000_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n"); + } +} + +/** + * igb_ptp_tx_hang - detect error case where Tx timestamp never finishes + * @adapter: private network adapter structure + */ +void igb_ptp_tx_hang(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IGB_PTP_TX_TIMEOUT); + + if (!adapter->ptp_tx_skb) + return; + + if (!test_bit(__IGB_PTP_TX_IN_PROGRESS, &adapter->state)) + return; + + /* If we haven't received a timestamp within the timeout, it is + * reasonable to assume that it will never occur, so we can unlock the + * timestamp bit when this occurs. + */ + if (timeout) { + cancel_work_sync(&adapter->ptp_tx_work); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); + } +} + +/** + * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure. + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + **/ +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) +{ + struct sk_buff *skb = adapter->ptp_tx_skb; + struct e1000_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval; + int adjust = 0; + + regval = rd32(E1000_TXSTMPL); + regval |= (u64)rd32(E1000_TXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + /* adjust timestamp for the TX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_TX_LATENCY_1000; + break; + } + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + /* Clear the lock early before calling skb_tstamp_tx so that + * applications are not woken up before the lock bit is clear. We use + * a copy of the skb pointer to ensure other threads can't change it + * while we're notifying the stack. + */ + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + + /* Notify the stack and free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp + * @q_vector: Pointer to interrupt specific structure + * @va: Pointer to address containing Rx buffer + * @timestamp: Pointer where timestamp will be stored + * + * This function is meant to retrieve a timestamp from the first buffer of an + * incoming frame. The value is stored in little endian format starting on + * byte 8 + * + * Returns: The timestamp header length or 0 if not available + **/ +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + ktime_t *timestamp) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct skb_shared_hwtstamps ts; + __le64 *regval = (__le64 *)va; + int adjust = 0; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return 0; + + /* The timestamp is recorded in little endian format. + * DWORD: 0 1 2 3 + * Field: Reserved Reserved SYSTIML SYSTIMH + */ + + /* check reserved dwords are zero, be/le doesn't matter for zero */ + if (regval[0]) + return 0; + + igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1])); + + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_RX_LATENCY_1000; + break; + } + } + + *timestamp = ktime_sub_ns(ts.hwtstamp, adjust); + + return IGB_TS_HDR_LEN; +} + +/** + * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register + * @q_vector: Pointer to interrupt specific structure + * @skb: Buffer containing timestamp and packet + * + * This function is meant to retrieve a timestamp from the internal registers + * of the adapter and store it in the skb. + **/ +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + int adjust = 0; + u64 regval; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return; + + /* If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register + * values must belong to this one here and therefore we don't need to + * compare any of the additional attributes stored for it. + * + * If nothing went wrong, then it should have a shared tx_flags that we + * can turn into a skb_shared_hwtstamps. + */ + if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + regval = rd32(E1000_RXSTMPL); + regval |= (u64)rd32(E1000_RXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_RX_LATENCY_1000; + break; + } + } + skb_hwtstamps(skb)->hwtstamp = + ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + adapter->last_rx_timestamp = jiffies; +} + +/** + * igb_ptp_get_ts_config - get hardware time stamping config + * @netdev: netdev struct + * @ifr: interface struct + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * igb_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't case any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + */ +static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, + struct hwtstamp_config *config) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 tsync_rx_cfg = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + break; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + /* 82576 cannot timestamp all packets, which it needs to do to + * support both V1 Sync and Delay_Req messages + */ + if (hw->mac.type != e1000_82576) { + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + fallthrough; + default: + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + if (hw->mac.type == e1000_82575) { + if (tsync_rx_ctl | tsync_tx_ctl) + return -EINVAL; + return 0; + } + + /* Per-packet timestamping only works if all packets are + * timestamped, so enable timestamping in all packets as + * long as one Rx filter was configured. + */ + if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { + tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + is_l2 = true; + is_l4 = true; + + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + regval = rd32(E1000_RXPBS); + regval |= E1000_RXPBS_CFG_TS_EN; + wr32(E1000_RXPBS, regval); + } + } + + /* enable/disable TX */ + regval = rd32(E1000_TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(E1000_TSYNCTXCTL, regval); + + /* enable/disable RX */ + regval = rd32(E1000_TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(E1000_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ + wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); + + /* define ethertype filter for timestamped packets */ + if (is_l2) + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), + (E1000_ETQF_FILTER_ENABLE | /* enable filter */ + E1000_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), 0); + + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IPPROTO_UDP /* UDP */ + | E1000_FTQF_VF_BP /* VF not compared */ + | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ + | E1000_FTQF_MASK); /* mask all inputs */ + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ + + wr32(E1000_IMIR(3), (__force unsigned int)htons(PTP_EV_PORT)); + wr32(E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + if (hw->mac.type == e1000_82576) { + /* enable source port check */ + wr32(E1000_SPQF(3), (__force unsigned int)htons(PTP_EV_PORT)); + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + } + wr32(E1000_FTQF(3), ftqf); + } else { + wr32(E1000_FTQF(3), E1000_FTQF_MASK); + } + wrfl(); + + /* clear TX/RX time stamp registers, just to be sure */ + regval = rd32(E1000_TXSTMPL); + regval = rd32(E1000_TXSTMPH); + regval = rd32(E1000_RXSTMPL); + regval = rd32(E1000_RXSTMPH); + + return 0; +} + +/** + * igb_ptp_set_ts_config - set hardware time stamping config + * @netdev: netdev struct + * @ifr: interface struct + * + **/ +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igb_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igb_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igb_ptp_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + switch (hw->mac.type) { + case e1000_82576: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 999999881; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfine = igb_ptp_adjfine_82576; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_feature_enable; + adapter->cc.read = igb_ptp_read_82576; + adapter->cc.mask = CYCLECOUNTER_MASK(64); + adapter->cc.mult = 1; + adapter->cc.shift = IGB_82576_TSYNC_SHIFT; + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; + break; + case e1000_82580: + case e1000_i354: + case e1000_i350: + igb_ptp_sdp_init(adapter); + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS; + adapter->ptp_caps.n_per_out = IGB_N_PEROUT; + adapter->ptp_caps.n_pins = IGB_N_SDP; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_feature_enable_82580; + adapter->ptp_caps.verify = igb_ptp_verify_pin; + adapter->cc.read = igb_ptp_read_82580; + adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); + adapter->cc.mult = 1; + adapter->cc.shift = 0; + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; + break; + case e1000_i210: + case e1000_i211: + igb_ptp_sdp_init(adapter); + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS; + adapter->ptp_caps.n_per_out = IGB_N_PEROUT; + adapter->ptp_caps.n_pins = IGB_N_SDP; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_i210; + adapter->ptp_caps.settime64 = igb_ptp_settime_i210; + adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; + adapter->ptp_caps.verify = igb_ptp_verify_pin; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + igb_ptp_reset(adapter); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + dev_info(&adapter->pdev->dev, "added PHC on %s\n", + adapter->netdev->name); + adapter->ptp_flags |= IGB_PTP_ENABLED; + } +} + +/** + * igb_ptp_sdp_init - utility function which inits the SDP config structs + * @adapter: Board private structure. + **/ +void igb_ptp_sdp_init(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < IGB_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } +} + +/** + * igb_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igb_ptp_suspend(struct igb_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) + return; + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + cancel_delayed_work_sync(&adapter->ptp_overflow_work); + + cancel_work_sync(&adapter->ptp_tx_work); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + } +} + +/** + * igb_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igb_ptp_stop(struct igb_adapter *adapter) +{ + igb_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + dev_info(&adapter->pdev->dev, "removed PHC on %s\n", + adapter->netdev->name); + adapter->ptp_flags &= ~IGB_PTP_ENABLED; + } +} + +/** + * igb_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igb_ptp_reset(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + + /* reset the tstamp_config */ + igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* Dial the nominal frequency. */ + wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + break; + case e1000_82580: + case e1000_i354: + case e1000_i350: + case e1000_i210: + case e1000_i211: + wr32(E1000_TSAUXC, 0x0); + wr32(E1000_TSSDP, 0x0); + wr32(E1000_TSIM, + TSYNC_INTERRUPTS | + (adapter->pps_sys_wrap_on ? TSINTR_SYS_WRAP : 0)); + wr32(E1000_IMS, E1000_IMS_TS); + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { + struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); + + igb_ptp_write_i210(adapter, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + schedule_delayed_work(&adapter->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +} diff --git a/devices/igc/Kbuild.in b/devices/igc/Kbuild.in new file mode 100644 index 00000000..59c79b70 --- /dev/null +++ b/devices/igc/Kbuild.in @@ -0,0 +1,66 @@ +#------------------------------------------------------------------------------ +# +# $Id$ +# +# Copyright (C) 2006-2017 Florian Pose, Ingenieurgemeinschaft IgH +# +# This file is part of the IgH EtherCAT Master. +# +# The IgH EtherCAT Master is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License version 2, as +# published by the Free Software Foundation. +# +# The IgH EtherCAT Master is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the IgH EtherCAT Master; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# --- +# +# The license mentioned above concerns the source code only. Using the +# EtherCAT technology and brand is only permitted in compliance with the +# industrial property and similar rights of Beckhoff Automation GmbH. +# +# --- +# +# vim: syntax=make +# +#------------------------------------------------------------------------------ + +TOPDIR := $(src)/../.. + +REV := $(shell if test -s $(TOPDIR)/revision; then \ + cat $(TOPDIR)/revision; \ + else \ + git -C $(TOPDIR) describe 2>/dev/null || echo "unknown"; \ + fi) + +ifeq (@ENABLE_IGC@,1) + obj-m += ec_igc.o + + ec_igc-objs := \ + igc_xdp-@KERNEL_IGC@-ethercat.o \ + igc_tsn-@KERNEL_IGC@-ethercat.o \ + igc_base-@KERNEL_IGC@-ethercat.o \ + igc_dump-@KERNEL_IGC@-ethercat.o \ + igc_main-@KERNEL_IGC@-ethercat.o \ + igc_ptp-@KERNEL_IGC@-ethercat.o \ + igc_diag-@KERNEL_IGC@-ethercat.o \ + igc_i225-@KERNEL_IGC@-ethercat.o \ + igc_mac-@KERNEL_IGC@-ethercat.o \ + igc_phy-@KERNEL_IGC@-ethercat.o \ + igc_ethtool-@KERNEL_IGC@-ethercat.o \ + igc_nvm-@KERNEL_IGC@-ethercat.o + + CFLAGS_igc_main-@KERNEL_IGC@-ethercat.o = -DREV=$(REV) +endif + +KBUILD_EXTRA_SYMBOLS := \ + @abs_top_builddir@/$(LINUX_SYMVERS) \ + @abs_top_builddir@/master/$(LINUX_SYMVERS) + +#------------------------------------------------------------------------------ diff --git a/devices/igc/Makefile.am b/devices/igc/Makefile.am new file mode 100644 index 00000000..891f133b --- /dev/null +++ b/devices/igc/Makefile.am @@ -0,0 +1,127 @@ +#------------------------------------------------------------------------------ +# +# Copyright (C) 2006-2021 Florian Pose, Ingenieurgemeinschaft IgH +# +# This file is part of the IgH EtherCAT Master. +# +# The IgH EtherCAT Master is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License version 2, as +# published by the Free Software Foundation. +# +# The IgH EtherCAT Master is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the IgH EtherCAT Master; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# --- +# +# The license mentioned above concerns the source code only. Using the +# EtherCAT technology and brand is only permitted in compliance with the +# industrial property and similar rights of Beckhoff Automation GmbH. +# +#------------------------------------------------------------------------------ + +include $(top_srcdir)/Makefile.kbuild + +EXTRiA_DIST = igc-6.6-ethercat.h \ + igc_base-6.6-orig.h \ + igc_diag-6.6-orig.c \ + igc_ethtool-6.6-orig.c \ + igc_i225-6.6-orig.c \ + igc_mac-6.6-orig.c \ + igc_nvm-6.6-ethercat.h \ + igc_phy-6.6-orig.c \ + igc_regs-6.6-orig.h \ + igc_xdp-6.6-ethercat.c \ + igc-6.6-orig.h \ + igc_defines-6.6-ethercat.h \ + igc_diag-6.6-orig.h \ + igc_hw-6.6-ethercat.h \ + igc_i225-6.6-orig.h \ + igc_mac-6.6-orig.h \ + igc_nvm-6.6-orig.c \ + igc_phy-6.6-orig.h \ + igc_tsn-6.6-ethercat.c \ + igc_xdp-6.6-ethercat.h \ + igc_base-6.6-ethercat.c \ + igc_defines-6.6-orig.h \ + igc_dump-6.6-ethercat.c \ + igc_hw-6.6-orig.h \ + igc_main-6.6-ethercat.c \ + igc_nvm-6.6-orig.h \ + igc_ptp-6.6-ethercat.c \ + igc_tsn-6.6-ethercat.h \ + igc_xdp-6.6-orig.c\ + igc_base-6.6-ethercat.h \ + igc_diag-6.6-ethercat.c \ + igc_dump-6.6-orig.c \ + igc_i225-6.6-ethercat.c \ + igc_mac-6.6-ethercat.c \ + igc_main-6.6-orig.c \ + igc_phy-6.6-ethercat.c \ + igc_ptp-6.6-orig.c \ + igc_tsn-6.6-orig.c \ + igc_xdp-6.6-orig.h\ + igc_base-6.6-orig.c \ + igc_diag-6.6-ethercat.h \ + igc_ethtool-6.6-ethercat.c \ + igc_i225-6.6-ethercat.h \ + igc_mac-6.6-ethercat.h \ + igc_nvm-6.6-ethercat.c \ + igc_phy-6.6-ethercat.h \ + igc_regs-6.6-ethercat.h \ + igc_tsn-6.6-orig.h \ + igc-6.4-ethercat.h \ + igc_base-6.4-orig.h \ + igc_diag-6.4-orig.c \ + igc_ethtool-6.4-orig.c \ + igc_i225-6.4-orig.c \ + igc_mac-6.4-orig.c \ + igc_nvm-6.4-ethercat.h \ + igc_phy-6.4-orig.c \ + igc_regs-6.4-orig.h \ + igc_xdp-6.4-ethercat.c \ + igc-6.4-orig.h \ + igc_defines-6.4-ethercat.h \ + igc_diag-6.4-orig.h \ + igc_hw-6.4-ethercat.h \ + igc_i225-6.4-orig.h \ + igc_mac-6.4-orig.h \ + igc_nvm-6.4-orig.c \ + igc_phy-6.4-orig.h \ + igc_tsn-6.4-ethercat.c \ + igc_xdp-6.4-ethercat.h \ + igc_base-6.4-ethercat.c \ + igc_defines-6.4-orig.h \ + igc_dump-6.4-ethercat.c \ + igc_hw-6.4-orig.h \ + igc_main-6.4-ethercat.c \ + igc_nvm-6.4-orig.h \ + igc_ptp-6.4-ethercat.c \ + igc_tsn-6.4-ethercat.h \ + igc_xdp-6.4-orig.c\ + igc_base-6.4-ethercat.h \ + igc_diag-6.4-ethercat.c \ + igc_dump-6.4-orig.c \ + igc_i225-6.4-ethercat.c \ + igc_mac-6.4-ethercat.c \ + igc_main-6.4-orig.c \ + igc_phy-6.4-ethercat.c \ + igc_ptp-6.4-orig.c \ + igc_tsn-6.4-orig.c \ + igc_xdp-6.4-orig.h\ + igc_base-6.4-orig.c \ + igc_diag-6.4-ethercat.h \ + igc_ethtool-6.4-ethercat.c \ + igc_i225-6.4-ethercat.h \ + igc_mac-6.4-ethercat.h \ + igc_nvm-6.4-ethercat.c \ + igc_phy-6.4-ethercat.h \ + igc_regs-6.4-ethercat.h \ + igc_tsn-6.4-orig.h + + diff --git a/devices/igc/igc-6.4-ethercat.h b/devices/igc/igc-6.4-ethercat.h new file mode 100644 index 00000000..52ad0a95 --- /dev/null +++ b/devices/igc/igc-6.4-ethercat.h @@ -0,0 +1,699 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_H_ +#define _IGC_H_ +#include "../ecdev.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igc_hw-6.4-ethercat.h" + +void igc_ethtool_set_ops(struct net_device *); + +/* Transmit and receive queues */ +#define IGC_MAX_RX_QUEUES 4 +#define IGC_MAX_TX_QUEUES 4 + +#define MAX_Q_VECTORS 8 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +#define MAX_ETYPE_FILTER 8 +#define IGC_RETA_SIZE 128 + +/* SDP support */ +#define IGC_N_EXTTS 2 +#define IGC_N_PEROUT 2 +#define IGC_N_SDP 4 + +#define MAX_FLEX_FILTER 32 + +enum igc_mac_filter_type { + IGC_MAC_FILTER_TYPE_DST = 0, + IGC_MAC_FILTER_TYPE_SRC +}; + +struct igc_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igc_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igc_rx_packet_stats { + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ + u64 other_packets; +}; + +struct igc_ring_container { + struct igc_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igc_ring { + struct igc_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igc_tx_buffer *tx_buffer_info; + struct igc_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ + ktime_t last_ff_cycle; /* Last cycle with an active first flag */ + + u32 start_time; + u32 end_time; + u32 max_sdu; + bool oper_gate_closed; /* Operating gate. True if the TX Queue is closed */ + bool admin_gate_closed; /* Future gate. True if the TX Queue will be closed */ + + /* CBS parameters */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igc_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct igc_rx_queue_stats rx_stats; + struct igc_rx_packet_stats pkt_stats; + struct u64_stats_sync rx_syncp; + struct sk_buff *skb; + }; + }; + + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; +} ____cacheline_internodealigned_in_smp; + +/* Board specific private data structure */ +struct igc_adapter { + struct net_device *netdev; + + struct ethtool_eee eee; + u16 eee_advert; + + unsigned long state; + unsigned int flags; + unsigned int num_q_vectors; + + struct msix_entry *msix_entries; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; + struct timer_list phy_info_timer; + struct hrtimer hrtimer; + + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 port_num; + + u8 __iomem *io_addr; + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; + struct work_struct dma_err_task; + bool fc_autoneg; + + u8 tx_timeout_factor; + + int msg_enable; + u32 max_frame_size; + u32 min_frame_size; + + int tc_setup_type; + ktime_t base_time; + ktime_t cycle_time; + bool taprio_offload_enable; + u32 qbv_config_change_errors; + bool qbv_transition; + unsigned int qbv_count; + + /* OS defined structs */ + struct pci_dev *pdev; + /* lock for statistics */ + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in igc_hw.h */ + struct igc_hw hw; + struct igc_hw_stats stats; + + struct igc_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + u16 tx_ring_count; + u16 rx_ring_count; + + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + + u32 rss_queues; + u32 rss_indir_tbl_init; + + /* Any access to elements in nfc_rule_list is protected by the + * nfc_rule_lock. + */ + struct mutex nfc_rule_lock; + struct list_head nfc_rule_list; + unsigned int nfc_rule_count; + + u8 rss_indir_tbl[IGC_RETA_SIZE]; + + unsigned long link_check_timeout; + struct igc_info ei; + + u32 test_icr; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + /* Access to ptp_tx_skb and ptp_tx_start are protected by the + * ptp_tx_lock. + */ + spinlock_t ptp_tx_lock; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned int ptp_flags; + /* System time value lock */ + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ + ktime_t ptp_reset_start; /* Reset time in clock mono */ + struct system_time_snapshot snapshot; + + char fw_version[32]; + + struct bpf_prog *xdp_prog; + + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGC_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGC_N_PEROUT]; + + /* EtherCAT device variables */ + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +void igc_up(struct igc_adapter *adapter); +void igc_down(struct igc_adapter *adapter); +int igc_open(struct net_device *netdev); +int igc_close(struct net_device *netdev); +int igc_setup_tx_resources(struct igc_ring *ring); +int igc_setup_rx_resources(struct igc_ring *ring); +void igc_free_tx_resources(struct igc_ring *ring); +void igc_free_rx_resources(struct igc_ring *ring); +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues); +int igc_reinit_queues(struct igc_adapter *adapter); +void igc_write_rss_indir_tbl(struct igc_adapter *adapter); +bool igc_has_link(struct igc_adapter *adapter); +void igc_reset(struct igc_adapter *adapter); +void igc_update_stats(struct igc_adapter *adapter); +void igc_disable_rx_ring(struct igc_ring *ring); +void igc_enable_rx_ring(struct igc_ring *ring); +void igc_disable_tx_ring(struct igc_ring *ring); +void igc_enable_tx_ring(struct igc_ring *ring); +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); + +/* igc_dump declarations */ +void igc_rings_dump(struct igc_adapter *adapter); +void igc_regs_dump(struct igc_adapter *adapter); + +extern char igc_driver_name[]; + +#define IGC_REGS_LEN 740 + +/* flags controlling PTP/1588 function */ +#define IGC_PTP_ENABLED BIT(0) + +/* Flags definitions */ +#define IGC_FLAG_HAS_MSI BIT(0) +#define IGC_FLAG_QUEUE_PAIRS BIT(3) +#define IGC_FLAG_DMAC BIT(4) +#define IGC_FLAG_PTP BIT(8) +#define IGC_FLAG_WOL_SUPPORTED BIT(8) +#define IGC_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGC_FLAG_HAS_MSIX BIT(13) +#define IGC_FLAG_EEE BIT(14) +#define IGC_FLAG_VLAN_PROMISC BIT(15) +#define IGC_FLAG_RX_LEGACY BIT(16) +#define IGC_FLAG_TSN_QBV_ENABLED BIT(17) +#define IGC_FLAG_TSN_QAV_ENABLED BIT(18) + +#define IGC_FLAG_TSN_ANY_ENABLED \ + (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED) + +#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) + +#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 + +/* RX-desc Write-Back format RSS Type's */ +enum igc_rss_type_num { + IGC_RSS_TYPE_NO_HASH = 0, + IGC_RSS_TYPE_HASH_TCP_IPV4 = 1, + IGC_RSS_TYPE_HASH_IPV4 = 2, + IGC_RSS_TYPE_HASH_TCP_IPV6 = 3, + IGC_RSS_TYPE_HASH_IPV6_EX = 4, + IGC_RSS_TYPE_HASH_IPV6 = 5, + IGC_RSS_TYPE_HASH_TCP_IPV6_EX = 6, + IGC_RSS_TYPE_HASH_UDP_IPV4 = 7, + IGC_RSS_TYPE_HASH_UDP_IPV6 = 8, + IGC_RSS_TYPE_HASH_UDP_IPV6_EX = 9, + IGC_RSS_TYPE_MAX = 10, +}; +#define IGC_RSS_TYPE_MAX_TABLE 16 +#define IGC_RSS_TYPE_MASK GENMASK(3,0) /* 4-bits (3:0) = mask 0x0F */ + +/* igc_rss_type - Rx descriptor RSS type field */ +static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc) +{ + /* RSS Type 4-bits (3:0) number: 0-9 (above 9 is reserved) + * Accessing the same bits via u16 (wb.lower.lo_dword.hs_rss.pkt_info) + * is slightly slower than via u32 (wb.lower.lo_dword.data) + */ + return le32_get_bits(rx_desc->wb.lower.lo_dword.data, IGC_RSS_TYPE_MASK); +} + +/* Interrupt defines */ +#define IGC_START_ITR 648 /* ~6000 ints/sec */ +#define IGC_4K_ITR 980 +#define IGC_20K_ITR 196 +#define IGC_70K_ITR 56 + +#define IGC_DEFAULT_ITR 3 /* dynamic */ +#define IGC_MAX_ITR_USECS 10000 +#define IGC_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_MSIX_ENTRIES 10 + +/* TX/RX descriptor defines */ +#define IGC_DEFAULT_TXD 256 +#define IGC_DEFAULT_TX_WORK 128 +#define IGC_MIN_TXD 80 +#define IGC_MAX_TXD 4096 + +#define IGC_DEFAULT_RXD 256 +#define IGC_MIN_RXD 80 +#define IGC_MAX_RXD 4096 + +/* Supported Rx Buffer Sizes */ +#define IGC_RXBUFFER_256 256 +#define IGC_RXBUFFER_2048 2048 +#define IGC_RXBUFFER_3072 3072 + +#define AUTO_ALL_MODES 0 +#define IGC_RX_HDR_LEN IGC_RXBUFFER_256 + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGC_I225_TX_LATENCY_10 240 +#define IGC_I225_TX_LATENCY_100 58 +#define IGC_I225_TX_LATENCY_1000 80 +#define IGC_I225_TX_LATENCY_2500 1325 +#define IGC_I225_RX_LATENCY_10 6450 +#define IGC_I225_RX_LATENCY_100 185 +#define IGC_I225_RX_LATENCY_1000 300 +#define IGC_I225_RX_LATENCY_2500 1485 + +/* RX and TX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGC_RX_PTHRESH 8 +#define IGC_RX_HTHRESH 8 +#define IGC_TX_PTHRESH 8 +#define IGC_TX_HTHRESH 1 +#define IGC_RX_WTHRESH 4 +#define IGC_TX_WTHRESH 16 + +#define IGC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define IGC_TS_HDR_LEN 16 + +#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) + +#if (PAGE_SIZE < 8192) +#define IGC_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) +#else +#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +/* VLAN info */ +#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGC_TX_FLAGS_VLAN_SHIFT 16 + +/* igc_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +enum igc_state_t { + __IGC_TESTING, + __IGC_RESETTING, + __IGC_DOWN, +}; + +enum igc_tx_flags { + /* cmd_type flags */ + IGC_TX_FLAGS_VLAN = 0x01, + IGC_TX_FLAGS_TSO = 0x02, + IGC_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGC_TX_FLAGS_IPV4 = 0x10, + IGC_TX_FLAGS_CSUM = 0x20, +}; + +enum igc_boards { + board_base, +}; + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGC_MAX_TXD_PWR 15 +#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +enum igc_tx_buffer_type { + IGC_TX_BUFFER_TYPE_SKB, + IGC_TX_BUFFER_TYPE_XDP, + IGC_TX_BUFFER_TYPE_XSK, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igc_tx_buffer { + union igc_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igc_tx_buffer_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igc_rx_buffer { + union { + struct { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; + }; + struct xdp_buff *xdp; + }; +}; + +/* context wrapper around xdp_buff to provide access to descriptor metadata */ +struct igc_xdp_buff { + struct xdp_buff xdp; + union igc_adv_rx_desc *rx_desc; +}; + +struct igc_q_vector { + struct igc_adapter *adapter; /* backlink */ + void __iomem *itr_register; + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + + struct igc_ring_container rx, tx; + + struct napi_struct napi; + + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + struct net_device poll_dev; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igc_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum igc_filter_match_flags { + IGC_FILTER_FLAG_ETHER_TYPE = BIT(0), + IGC_FILTER_FLAG_VLAN_TCI = BIT(1), + IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2), + IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3), + IGC_FILTER_FLAG_USER_DATA = BIT(4), + IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5), +}; + +struct igc_nfc_filter { + u8 match_flags; + u16 etype; + __be16 vlan_etype; + u16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; + u8 user_data[8]; + u8 user_mask[8]; + u8 flex_index; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +struct igc_nfc_rule { + struct list_head list; + struct igc_nfc_filter filter; + u32 location; + u16 action; + bool flex; +}; + +/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority + * based, 8 ethertype based and 32 Flex filter based rules. + */ +#define IGC_MAX_RXNFC_RULES 64 + +struct igc_flex_filter { + u8 index; + u8 data[128]; + u8 mask[16]; + u8 length; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +/* igc_desc_unused - calculate if we have unused descriptors */ +static inline u16 igc_desc_unused(const struct igc_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline s32 igc_get_phy_info(struct igc_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline s32 igc_reset_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +enum igc_ring_flags_t { + IGC_RING_FLAG_RX_3K_BUFFER, + IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGC_RING_FLAG_RX_SCTP_CSUM, + IGC_RING_FLAG_RX_LB_VLAN_BSWAP, + IGC_RING_FLAG_TX_CTX_IDX, + IGC_RING_FLAG_TX_DETECT_HANG, + IGC_RING_FLAG_AF_XDP_ZC, + IGC_RING_FLAG_TX_HWTSTAMP, +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGC_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; +#endif + return IGC_RXBUFFER_2048; +} + +static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return -EOPNOTSUPP; +} + +void igc_reinit_locked(struct igc_adapter *); +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location); +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); + +void igc_ptp_init(struct igc_adapter *adapter); +void igc_ptp_reset(struct igc_adapter *adapter); +void igc_ptp_suspend(struct igc_adapter *adapter); +void igc_ptp_stop(struct igc_adapter *adapter); +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf); +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igc_ptp_tx_hang(struct igc_adapter *adapter); +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); +void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter); + +#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) + +#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) + +#define IGC_RX_DESC(R, i) \ + (&(((union igc_adv_rx_desc *)((R)->desc))[i])) +#define IGC_TX_DESC(R, i) \ + (&(((union igc_adv_tx_desc *)((R)->desc))[i])) +#define IGC_TX_CTXTDESC(R, i) \ + (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) + +#endif /* _IGC_H_ */ diff --git a/devices/igc/igc-6.4-orig.h b/devices/igc/igc-6.4-orig.h new file mode 100644 index 00000000..345d3a4e --- /dev/null +++ b/devices/igc/igc-6.4-orig.h @@ -0,0 +1,694 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_H_ +#define _IGC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igc_hw.h" + +void igc_ethtool_set_ops(struct net_device *); + +/* Transmit and receive queues */ +#define IGC_MAX_RX_QUEUES 4 +#define IGC_MAX_TX_QUEUES 4 + +#define MAX_Q_VECTORS 8 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +#define MAX_ETYPE_FILTER 8 +#define IGC_RETA_SIZE 128 + +/* SDP support */ +#define IGC_N_EXTTS 2 +#define IGC_N_PEROUT 2 +#define IGC_N_SDP 4 + +#define MAX_FLEX_FILTER 32 + +enum igc_mac_filter_type { + IGC_MAC_FILTER_TYPE_DST = 0, + IGC_MAC_FILTER_TYPE_SRC +}; + +struct igc_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igc_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igc_rx_packet_stats { + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ + u64 other_packets; +}; + +struct igc_ring_container { + struct igc_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igc_ring { + struct igc_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igc_tx_buffer *tx_buffer_info; + struct igc_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ + ktime_t last_ff_cycle; /* Last cycle with an active first flag */ + + u32 start_time; + u32 end_time; + u32 max_sdu; + bool oper_gate_closed; /* Operating gate. True if the TX Queue is closed */ + bool admin_gate_closed; /* Future gate. True if the TX Queue will be closed */ + + /* CBS parameters */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igc_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct igc_rx_queue_stats rx_stats; + struct igc_rx_packet_stats pkt_stats; + struct u64_stats_sync rx_syncp; + struct sk_buff *skb; + }; + }; + + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; +} ____cacheline_internodealigned_in_smp; + +/* Board specific private data structure */ +struct igc_adapter { + struct net_device *netdev; + + struct ethtool_eee eee; + u16 eee_advert; + + unsigned long state; + unsigned int flags; + unsigned int num_q_vectors; + + struct msix_entry *msix_entries; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; + struct timer_list phy_info_timer; + struct hrtimer hrtimer; + + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 port_num; + + u8 __iomem *io_addr; + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; + struct work_struct dma_err_task; + bool fc_autoneg; + + u8 tx_timeout_factor; + + int msg_enable; + u32 max_frame_size; + u32 min_frame_size; + + int tc_setup_type; + ktime_t base_time; + ktime_t cycle_time; + bool taprio_offload_enable; + u32 qbv_config_change_errors; + bool qbv_transition; + unsigned int qbv_count; + + /* OS defined structs */ + struct pci_dev *pdev; + /* lock for statistics */ + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in igc_hw.h */ + struct igc_hw hw; + struct igc_hw_stats stats; + + struct igc_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + u16 tx_ring_count; + u16 rx_ring_count; + + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + + u32 rss_queues; + u32 rss_indir_tbl_init; + + /* Any access to elements in nfc_rule_list is protected by the + * nfc_rule_lock. + */ + struct mutex nfc_rule_lock; + struct list_head nfc_rule_list; + unsigned int nfc_rule_count; + + u8 rss_indir_tbl[IGC_RETA_SIZE]; + + unsigned long link_check_timeout; + struct igc_info ei; + + u32 test_icr; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + /* Access to ptp_tx_skb and ptp_tx_start are protected by the + * ptp_tx_lock. + */ + spinlock_t ptp_tx_lock; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned int ptp_flags; + /* System time value lock */ + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ + ktime_t ptp_reset_start; /* Reset time in clock mono */ + struct system_time_snapshot snapshot; + + char fw_version[32]; + + struct bpf_prog *xdp_prog; + + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGC_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGC_N_PEROUT]; +}; + +void igc_up(struct igc_adapter *adapter); +void igc_down(struct igc_adapter *adapter); +int igc_open(struct net_device *netdev); +int igc_close(struct net_device *netdev); +int igc_setup_tx_resources(struct igc_ring *ring); +int igc_setup_rx_resources(struct igc_ring *ring); +void igc_free_tx_resources(struct igc_ring *ring); +void igc_free_rx_resources(struct igc_ring *ring); +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues); +int igc_reinit_queues(struct igc_adapter *adapter); +void igc_write_rss_indir_tbl(struct igc_adapter *adapter); +bool igc_has_link(struct igc_adapter *adapter); +void igc_reset(struct igc_adapter *adapter); +void igc_update_stats(struct igc_adapter *adapter); +void igc_disable_rx_ring(struct igc_ring *ring); +void igc_enable_rx_ring(struct igc_ring *ring); +void igc_disable_tx_ring(struct igc_ring *ring); +void igc_enable_tx_ring(struct igc_ring *ring); +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); + +/* igc_dump declarations */ +void igc_rings_dump(struct igc_adapter *adapter); +void igc_regs_dump(struct igc_adapter *adapter); + +extern char igc_driver_name[]; + +#define IGC_REGS_LEN 740 + +/* flags controlling PTP/1588 function */ +#define IGC_PTP_ENABLED BIT(0) + +/* Flags definitions */ +#define IGC_FLAG_HAS_MSI BIT(0) +#define IGC_FLAG_QUEUE_PAIRS BIT(3) +#define IGC_FLAG_DMAC BIT(4) +#define IGC_FLAG_PTP BIT(8) +#define IGC_FLAG_WOL_SUPPORTED BIT(8) +#define IGC_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGC_FLAG_HAS_MSIX BIT(13) +#define IGC_FLAG_EEE BIT(14) +#define IGC_FLAG_VLAN_PROMISC BIT(15) +#define IGC_FLAG_RX_LEGACY BIT(16) +#define IGC_FLAG_TSN_QBV_ENABLED BIT(17) +#define IGC_FLAG_TSN_QAV_ENABLED BIT(18) + +#define IGC_FLAG_TSN_ANY_ENABLED \ + (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED) + +#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) + +#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 + +/* RX-desc Write-Back format RSS Type's */ +enum igc_rss_type_num { + IGC_RSS_TYPE_NO_HASH = 0, + IGC_RSS_TYPE_HASH_TCP_IPV4 = 1, + IGC_RSS_TYPE_HASH_IPV4 = 2, + IGC_RSS_TYPE_HASH_TCP_IPV6 = 3, + IGC_RSS_TYPE_HASH_IPV6_EX = 4, + IGC_RSS_TYPE_HASH_IPV6 = 5, + IGC_RSS_TYPE_HASH_TCP_IPV6_EX = 6, + IGC_RSS_TYPE_HASH_UDP_IPV4 = 7, + IGC_RSS_TYPE_HASH_UDP_IPV6 = 8, + IGC_RSS_TYPE_HASH_UDP_IPV6_EX = 9, + IGC_RSS_TYPE_MAX = 10, +}; +#define IGC_RSS_TYPE_MAX_TABLE 16 +#define IGC_RSS_TYPE_MASK GENMASK(3,0) /* 4-bits (3:0) = mask 0x0F */ + +/* igc_rss_type - Rx descriptor RSS type field */ +static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc) +{ + /* RSS Type 4-bits (3:0) number: 0-9 (above 9 is reserved) + * Accessing the same bits via u16 (wb.lower.lo_dword.hs_rss.pkt_info) + * is slightly slower than via u32 (wb.lower.lo_dword.data) + */ + return le32_get_bits(rx_desc->wb.lower.lo_dword.data, IGC_RSS_TYPE_MASK); +} + +/* Interrupt defines */ +#define IGC_START_ITR 648 /* ~6000 ints/sec */ +#define IGC_4K_ITR 980 +#define IGC_20K_ITR 196 +#define IGC_70K_ITR 56 + +#define IGC_DEFAULT_ITR 3 /* dynamic */ +#define IGC_MAX_ITR_USECS 10000 +#define IGC_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_MSIX_ENTRIES 10 + +/* TX/RX descriptor defines */ +#define IGC_DEFAULT_TXD 256 +#define IGC_DEFAULT_TX_WORK 128 +#define IGC_MIN_TXD 80 +#define IGC_MAX_TXD 4096 + +#define IGC_DEFAULT_RXD 256 +#define IGC_MIN_RXD 80 +#define IGC_MAX_RXD 4096 + +/* Supported Rx Buffer Sizes */ +#define IGC_RXBUFFER_256 256 +#define IGC_RXBUFFER_2048 2048 +#define IGC_RXBUFFER_3072 3072 + +#define AUTO_ALL_MODES 0 +#define IGC_RX_HDR_LEN IGC_RXBUFFER_256 + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGC_I225_TX_LATENCY_10 240 +#define IGC_I225_TX_LATENCY_100 58 +#define IGC_I225_TX_LATENCY_1000 80 +#define IGC_I225_TX_LATENCY_2500 1325 +#define IGC_I225_RX_LATENCY_10 6450 +#define IGC_I225_RX_LATENCY_100 185 +#define IGC_I225_RX_LATENCY_1000 300 +#define IGC_I225_RX_LATENCY_2500 1485 + +/* RX and TX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGC_RX_PTHRESH 8 +#define IGC_RX_HTHRESH 8 +#define IGC_TX_PTHRESH 8 +#define IGC_TX_HTHRESH 1 +#define IGC_RX_WTHRESH 4 +#define IGC_TX_WTHRESH 16 + +#define IGC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define IGC_TS_HDR_LEN 16 + +#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) + +#if (PAGE_SIZE < 8192) +#define IGC_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) +#else +#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +/* VLAN info */ +#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGC_TX_FLAGS_VLAN_SHIFT 16 + +/* igc_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +enum igc_state_t { + __IGC_TESTING, + __IGC_RESETTING, + __IGC_DOWN, +}; + +enum igc_tx_flags { + /* cmd_type flags */ + IGC_TX_FLAGS_VLAN = 0x01, + IGC_TX_FLAGS_TSO = 0x02, + IGC_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGC_TX_FLAGS_IPV4 = 0x10, + IGC_TX_FLAGS_CSUM = 0x20, +}; + +enum igc_boards { + board_base, +}; + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGC_MAX_TXD_PWR 15 +#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +enum igc_tx_buffer_type { + IGC_TX_BUFFER_TYPE_SKB, + IGC_TX_BUFFER_TYPE_XDP, + IGC_TX_BUFFER_TYPE_XSK, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igc_tx_buffer { + union igc_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igc_tx_buffer_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igc_rx_buffer { + union { + struct { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; + }; + struct xdp_buff *xdp; + }; +}; + +/* context wrapper around xdp_buff to provide access to descriptor metadata */ +struct igc_xdp_buff { + struct xdp_buff xdp; + union igc_adv_rx_desc *rx_desc; +}; + +struct igc_q_vector { + struct igc_adapter *adapter; /* backlink */ + void __iomem *itr_register; + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + + struct igc_ring_container rx, tx; + + struct napi_struct napi; + + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + struct net_device poll_dev; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igc_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum igc_filter_match_flags { + IGC_FILTER_FLAG_ETHER_TYPE = BIT(0), + IGC_FILTER_FLAG_VLAN_TCI = BIT(1), + IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2), + IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3), + IGC_FILTER_FLAG_USER_DATA = BIT(4), + IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5), +}; + +struct igc_nfc_filter { + u8 match_flags; + u16 etype; + __be16 vlan_etype; + u16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; + u8 user_data[8]; + u8 user_mask[8]; + u8 flex_index; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +struct igc_nfc_rule { + struct list_head list; + struct igc_nfc_filter filter; + u32 location; + u16 action; + bool flex; +}; + +/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority + * based, 8 ethertype based and 32 Flex filter based rules. + */ +#define IGC_MAX_RXNFC_RULES 64 + +struct igc_flex_filter { + u8 index; + u8 data[128]; + u8 mask[16]; + u8 length; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +/* igc_desc_unused - calculate if we have unused descriptors */ +static inline u16 igc_desc_unused(const struct igc_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline s32 igc_get_phy_info(struct igc_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline s32 igc_reset_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +enum igc_ring_flags_t { + IGC_RING_FLAG_RX_3K_BUFFER, + IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGC_RING_FLAG_RX_SCTP_CSUM, + IGC_RING_FLAG_RX_LB_VLAN_BSWAP, + IGC_RING_FLAG_TX_CTX_IDX, + IGC_RING_FLAG_TX_DETECT_HANG, + IGC_RING_FLAG_AF_XDP_ZC, + IGC_RING_FLAG_TX_HWTSTAMP, +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGC_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; +#endif + return IGC_RXBUFFER_2048; +} + +static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return -EOPNOTSUPP; +} + +void igc_reinit_locked(struct igc_adapter *); +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location); +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); + +void igc_ptp_init(struct igc_adapter *adapter); +void igc_ptp_reset(struct igc_adapter *adapter); +void igc_ptp_suspend(struct igc_adapter *adapter); +void igc_ptp_stop(struct igc_adapter *adapter); +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf); +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igc_ptp_tx_hang(struct igc_adapter *adapter); +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); +void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter); + +#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) + +#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) + +#define IGC_RX_DESC(R, i) \ + (&(((union igc_adv_rx_desc *)((R)->desc))[i])) +#define IGC_TX_DESC(R, i) \ + (&(((union igc_adv_tx_desc *)((R)->desc))[i])) +#define IGC_TX_CTXTDESC(R, i) \ + (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) + +#endif /* _IGC_H_ */ diff --git a/devices/igc/igc-6.6-ethercat.h b/devices/igc/igc-6.6-ethercat.h new file mode 100644 index 00000000..18efb14e --- /dev/null +++ b/devices/igc/igc-6.6-ethercat.h @@ -0,0 +1,719 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_H_ +#define _IGC_H_ +#include "../ecdev.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igc_hw-6.6-ethercat.h" + +void igc_ethtool_set_ops(struct net_device *); + +/* Transmit and receive queues */ +#define IGC_MAX_RX_QUEUES 4 +#define IGC_MAX_TX_QUEUES 4 + +#define MAX_Q_VECTORS 8 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +#define MAX_ETYPE_FILTER 8 +#define IGC_RETA_SIZE 128 + +/* SDP support */ +#define IGC_N_EXTTS 2 +#define IGC_N_PEROUT 2 +#define IGC_N_SDP 4 + +#define MAX_FLEX_FILTER 32 + +#define IGC_MAX_TX_TSTAMP_REGS 4 + +enum igc_mac_filter_type { + IGC_MAC_FILTER_TYPE_DST = 0, + IGC_MAC_FILTER_TYPE_SRC +}; + +struct igc_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igc_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igc_rx_packet_stats { + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ + u64 other_packets; +}; + +struct igc_tx_timestamp_request { + struct sk_buff *skb; /* reference to the packet being timestamped */ + unsigned long start; /* when the tstamp request started (jiffies) */ + u32 mask; /* _TSYNCTXCTL_TXTT_{X} bit for this request */ + u32 regl; /* which TXSTMPL_{X} register should be used */ + u32 regh; /* which TXSTMPH_{X} register should be used */ + u32 flags; /* flags that should be added to the tx_buffer */ +}; + +struct igc_ring_container { + struct igc_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igc_ring { + struct igc_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igc_tx_buffer *tx_buffer_info; + struct igc_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ + ktime_t last_ff_cycle; /* Last cycle with an active first flag */ + + u32 start_time; + u32 end_time; + u32 max_sdu; + bool oper_gate_closed; /* Operating gate. True if the TX Queue is closed */ + bool admin_gate_closed; /* Future gate. True if the TX Queue will be closed */ + + /* CBS parameters */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igc_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct igc_rx_queue_stats rx_stats; + struct igc_rx_packet_stats pkt_stats; + struct u64_stats_sync rx_syncp; + struct sk_buff *skb; + }; + }; + + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; +} ____cacheline_internodealigned_in_smp; + +/* Board specific private data structure */ +struct igc_adapter { + struct net_device *netdev; + + struct ethtool_eee eee; + u16 eee_advert; + + unsigned long state; + unsigned int flags; + unsigned int num_q_vectors; + + struct msix_entry *msix_entries; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; + struct timer_list phy_info_timer; + struct hrtimer hrtimer; + + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 port_num; + + u8 __iomem *io_addr; + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; + struct work_struct dma_err_task; + bool fc_autoneg; + + u8 tx_timeout_factor; + + int msg_enable; + u32 max_frame_size; + u32 min_frame_size; + + int tc_setup_type; + ktime_t base_time; + ktime_t cycle_time; + bool taprio_offload_enable; + u32 qbv_config_change_errors; + bool qbv_transition; + unsigned int qbv_count; + /* Access to oper_gate_closed, admin_gate_closed and qbv_transition + * are protected by the qbv_tx_lock. + */ + spinlock_t qbv_tx_lock; + + /* OS defined structs */ + struct pci_dev *pdev; + /* lock for statistics */ + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in igc_hw.h */ + struct igc_hw hw; + struct igc_hw_stats stats; + + struct igc_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + u16 tx_ring_count; + u16 rx_ring_count; + + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + + u32 rss_queues; + u32 rss_indir_tbl_init; + + /* Any access to elements in nfc_rule_list is protected by the + * nfc_rule_lock. + */ + struct mutex nfc_rule_lock; + struct list_head nfc_rule_list; + unsigned int nfc_rule_count; + + u8 rss_indir_tbl[IGC_RETA_SIZE]; + + unsigned long link_check_timeout; + struct igc_info ei; + + u32 test_icr; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + /* Access to ptp_tx_skb and ptp_tx_start are protected by the + * ptp_tx_lock. + */ + spinlock_t ptp_tx_lock; + struct igc_tx_timestamp_request tx_tstamp[IGC_MAX_TX_TSTAMP_REGS]; + struct hwtstamp_config tstamp_config; + unsigned int ptp_flags; + /* System time value lock */ + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ + ktime_t ptp_reset_start; /* Reset time in clock mono */ + struct system_time_snapshot snapshot; + + char fw_version[32]; + + struct bpf_prog *xdp_prog; + + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGC_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGC_N_PEROUT]; + + /* EtherCAT device variables */ + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +void igc_up(struct igc_adapter *adapter); +void igc_down(struct igc_adapter *adapter); +int igc_open(struct net_device *netdev); +int igc_close(struct net_device *netdev); +int igc_setup_tx_resources(struct igc_ring *ring); +int igc_setup_rx_resources(struct igc_ring *ring); +void igc_free_tx_resources(struct igc_ring *ring); +void igc_free_rx_resources(struct igc_ring *ring); +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues); +int igc_reinit_queues(struct igc_adapter *adapter); +void igc_write_rss_indir_tbl(struct igc_adapter *adapter); +bool igc_has_link(struct igc_adapter *adapter); +void igc_reset(struct igc_adapter *adapter); +void igc_update_stats(struct igc_adapter *adapter); +void igc_disable_rx_ring(struct igc_ring *ring); +void igc_enable_rx_ring(struct igc_ring *ring); +void igc_disable_tx_ring(struct igc_ring *ring); +void igc_enable_tx_ring(struct igc_ring *ring); +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); + +/* igc_dump declarations */ +void igc_rings_dump(struct igc_adapter *adapter); +void igc_regs_dump(struct igc_adapter *adapter); + +extern char igc_driver_name[]; + +#define IGC_REGS_LEN 740 + +/* flags controlling PTP/1588 function */ +#define IGC_PTP_ENABLED BIT(0) + +/* Flags definitions */ +#define IGC_FLAG_HAS_MSI BIT(0) +#define IGC_FLAG_QUEUE_PAIRS BIT(3) +#define IGC_FLAG_DMAC BIT(4) +#define IGC_FLAG_PTP BIT(8) +#define IGC_FLAG_WOL_SUPPORTED BIT(8) +#define IGC_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGC_FLAG_HAS_MSIX BIT(13) +#define IGC_FLAG_EEE BIT(14) +#define IGC_FLAG_VLAN_PROMISC BIT(15) +#define IGC_FLAG_RX_LEGACY BIT(16) +#define IGC_FLAG_TSN_QBV_ENABLED BIT(17) +#define IGC_FLAG_TSN_QAV_ENABLED BIT(18) + +#define IGC_FLAG_TSN_ANY_ENABLED \ + (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED) + +#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) + +#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 + +/* RX-desc Write-Back format RSS Type's */ +enum igc_rss_type_num { + IGC_RSS_TYPE_NO_HASH = 0, + IGC_RSS_TYPE_HASH_TCP_IPV4 = 1, + IGC_RSS_TYPE_HASH_IPV4 = 2, + IGC_RSS_TYPE_HASH_TCP_IPV6 = 3, + IGC_RSS_TYPE_HASH_IPV6_EX = 4, + IGC_RSS_TYPE_HASH_IPV6 = 5, + IGC_RSS_TYPE_HASH_TCP_IPV6_EX = 6, + IGC_RSS_TYPE_HASH_UDP_IPV4 = 7, + IGC_RSS_TYPE_HASH_UDP_IPV6 = 8, + IGC_RSS_TYPE_HASH_UDP_IPV6_EX = 9, + IGC_RSS_TYPE_MAX = 10, +}; +#define IGC_RSS_TYPE_MAX_TABLE 16 +#define IGC_RSS_TYPE_MASK GENMASK(3,0) /* 4-bits (3:0) = mask 0x0F */ + +/* igc_rss_type - Rx descriptor RSS type field */ +static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc) +{ + /* RSS Type 4-bits (3:0) number: 0-9 (above 9 is reserved) + * Accessing the same bits via u16 (wb.lower.lo_dword.hs_rss.pkt_info) + * is slightly slower than via u32 (wb.lower.lo_dword.data) + */ + return le32_get_bits(rx_desc->wb.lower.lo_dword.data, IGC_RSS_TYPE_MASK); +} + +/* Interrupt defines */ +#define IGC_START_ITR 648 /* ~6000 ints/sec */ +#define IGC_4K_ITR 980 +#define IGC_20K_ITR 196 +#define IGC_70K_ITR 56 + +#define IGC_DEFAULT_ITR 3 /* dynamic */ +#define IGC_MAX_ITR_USECS 10000 +#define IGC_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_MSIX_ENTRIES 10 + +/* TX/RX descriptor defines */ +#define IGC_DEFAULT_TXD 256 +#define IGC_DEFAULT_TX_WORK 128 +#define IGC_MIN_TXD 64 +#define IGC_MAX_TXD 4096 + +#define IGC_DEFAULT_RXD 256 +#define IGC_MIN_RXD 64 +#define IGC_MAX_RXD 4096 + +/* Supported Rx Buffer Sizes */ +#define IGC_RXBUFFER_256 256 +#define IGC_RXBUFFER_2048 2048 +#define IGC_RXBUFFER_3072 3072 + +#define AUTO_ALL_MODES 0 +#define IGC_RX_HDR_LEN IGC_RXBUFFER_256 + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGC_I225_TX_LATENCY_10 240 +#define IGC_I225_TX_LATENCY_100 58 +#define IGC_I225_TX_LATENCY_1000 80 +#define IGC_I225_TX_LATENCY_2500 1325 +#define IGC_I225_RX_LATENCY_10 6450 +#define IGC_I225_RX_LATENCY_100 185 +#define IGC_I225_RX_LATENCY_1000 300 +#define IGC_I225_RX_LATENCY_2500 1485 + +/* RX and TX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGC_RX_PTHRESH 8 +#define IGC_RX_HTHRESH 8 +#define IGC_TX_PTHRESH 8 +#define IGC_TX_HTHRESH 1 +#define IGC_RX_WTHRESH 4 +#define IGC_TX_WTHRESH 16 + +#define IGC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define IGC_TS_HDR_LEN 16 + +#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) + +#if (PAGE_SIZE < 8192) +#define IGC_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) +#else +#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +/* VLAN info */ +#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGC_TX_FLAGS_VLAN_SHIFT 16 + +/* igc_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +enum igc_state_t { + __IGC_TESTING, + __IGC_RESETTING, + __IGC_DOWN, +}; + +enum igc_tx_flags { + /* cmd_type flags */ + IGC_TX_FLAGS_VLAN = 0x01, + IGC_TX_FLAGS_TSO = 0x02, + IGC_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGC_TX_FLAGS_IPV4 = 0x10, + IGC_TX_FLAGS_CSUM = 0x20, + + IGC_TX_FLAGS_TSTAMP_1 = 0x100, + IGC_TX_FLAGS_TSTAMP_2 = 0x200, + IGC_TX_FLAGS_TSTAMP_3 = 0x400, +}; + +enum igc_boards { + board_base, +}; + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGC_MAX_TXD_PWR 15 +#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +enum igc_tx_buffer_type { + IGC_TX_BUFFER_TYPE_SKB, + IGC_TX_BUFFER_TYPE_XDP, + IGC_TX_BUFFER_TYPE_XSK, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igc_tx_buffer { + union igc_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igc_tx_buffer_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igc_rx_buffer { + union { + struct { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; + }; + struct xdp_buff *xdp; + }; +}; + +/* context wrapper around xdp_buff to provide access to descriptor metadata */ +struct igc_xdp_buff { + struct xdp_buff xdp; + union igc_adv_rx_desc *rx_desc; + ktime_t rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */ +}; + +struct igc_q_vector { + struct igc_adapter *adapter; /* backlink */ + void __iomem *itr_register; + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + + struct igc_ring_container rx, tx; + + struct napi_struct napi; + + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + struct net_device poll_dev; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igc_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum igc_filter_match_flags { + IGC_FILTER_FLAG_ETHER_TYPE = BIT(0), + IGC_FILTER_FLAG_VLAN_TCI = BIT(1), + IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2), + IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3), + IGC_FILTER_FLAG_USER_DATA = BIT(4), + IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5), +}; + +struct igc_nfc_filter { + u8 match_flags; + u16 etype; + __be16 vlan_etype; + u16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; + u8 user_data[8]; + u8 user_mask[8]; + u8 flex_index; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +struct igc_nfc_rule { + struct list_head list; + struct igc_nfc_filter filter; + u32 location; + u16 action; + bool flex; +}; + +/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority + * based, 8 ethertype based and 32 Flex filter based rules. + */ +#define IGC_MAX_RXNFC_RULES 64 + +struct igc_flex_filter { + u8 index; + u8 data[128]; + u8 mask[16]; + u8 length; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +/* igc_desc_unused - calculate if we have unused descriptors */ +static inline u16 igc_desc_unused(const struct igc_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline s32 igc_get_phy_info(struct igc_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline s32 igc_reset_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +enum igc_ring_flags_t { + IGC_RING_FLAG_RX_3K_BUFFER, + IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGC_RING_FLAG_RX_SCTP_CSUM, + IGC_RING_FLAG_RX_LB_VLAN_BSWAP, + IGC_RING_FLAG_TX_CTX_IDX, + IGC_RING_FLAG_TX_DETECT_HANG, + IGC_RING_FLAG_AF_XDP_ZC, + IGC_RING_FLAG_TX_HWTSTAMP, +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGC_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; +#endif + return IGC_RXBUFFER_2048; +} + +static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return -EOPNOTSUPP; +} + +void igc_reinit_locked(struct igc_adapter *); +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location); +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); + +void igc_ptp_init(struct igc_adapter *adapter); +void igc_ptp_reset(struct igc_adapter *adapter); +void igc_ptp_suspend(struct igc_adapter *adapter); +void igc_ptp_stop(struct igc_adapter *adapter); +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf); +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igc_ptp_tx_hang(struct igc_adapter *adapter); +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); +void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter); + +#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) + +#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) + +#define IGC_RX_DESC(R, i) \ + (&(((union igc_adv_rx_desc *)((R)->desc))[i])) +#define IGC_TX_DESC(R, i) \ + (&(((union igc_adv_tx_desc *)((R)->desc))[i])) +#define IGC_TX_CTXTDESC(R, i) \ + (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) + +#endif /* _IGC_H_ */ diff --git a/devices/igc/igc-6.6-orig.h b/devices/igc/igc-6.6-orig.h new file mode 100644 index 00000000..f48f82d5 --- /dev/null +++ b/devices/igc/igc-6.6-orig.h @@ -0,0 +1,714 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_H_ +#define _IGC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igc_hw.h" + +void igc_ethtool_set_ops(struct net_device *); + +/* Transmit and receive queues */ +#define IGC_MAX_RX_QUEUES 4 +#define IGC_MAX_TX_QUEUES 4 + +#define MAX_Q_VECTORS 8 +#define MAX_STD_JUMBO_FRAME_SIZE 9216 + +#define MAX_ETYPE_FILTER 8 +#define IGC_RETA_SIZE 128 + +/* SDP support */ +#define IGC_N_EXTTS 2 +#define IGC_N_PEROUT 2 +#define IGC_N_SDP 4 + +#define MAX_FLEX_FILTER 32 + +#define IGC_MAX_TX_TSTAMP_REGS 4 + +enum igc_mac_filter_type { + IGC_MAC_FILTER_TYPE_DST = 0, + IGC_MAC_FILTER_TYPE_SRC +}; + +struct igc_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igc_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igc_rx_packet_stats { + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ + u64 other_packets; +}; + +struct igc_tx_timestamp_request { + struct sk_buff *skb; /* reference to the packet being timestamped */ + unsigned long start; /* when the tstamp request started (jiffies) */ + u32 mask; /* _TSYNCTXCTL_TXTT_{X} bit for this request */ + u32 regl; /* which TXSTMPL_{X} register should be used */ + u32 regh; /* which TXSTMPH_{X} register should be used */ + u32 flags; /* flags that should be added to the tx_buffer */ +}; + +struct igc_ring_container { + struct igc_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igc_ring { + struct igc_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igc_tx_buffer *tx_buffer_info; + struct igc_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */ + ktime_t last_ff_cycle; /* Last cycle with an active first flag */ + + u32 start_time; + u32 end_time; + u32 max_sdu; + bool oper_gate_closed; /* Operating gate. True if the TX Queue is closed */ + bool admin_gate_closed; /* Future gate. True if the TX Queue will be closed */ + + /* CBS parameters */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igc_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct igc_rx_queue_stats rx_stats; + struct igc_rx_packet_stats pkt_stats; + struct u64_stats_sync rx_syncp; + struct sk_buff *skb; + }; + }; + + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; +} ____cacheline_internodealigned_in_smp; + +/* Board specific private data structure */ +struct igc_adapter { + struct net_device *netdev; + + struct ethtool_eee eee; + u16 eee_advert; + + unsigned long state; + unsigned int flags; + unsigned int num_q_vectors; + + struct msix_entry *msix_entries; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; + struct timer_list phy_info_timer; + struct hrtimer hrtimer; + + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + u8 port_num; + + u8 __iomem *io_addr; + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; + struct work_struct dma_err_task; + bool fc_autoneg; + + u8 tx_timeout_factor; + + int msg_enable; + u32 max_frame_size; + u32 min_frame_size; + + int tc_setup_type; + ktime_t base_time; + ktime_t cycle_time; + bool taprio_offload_enable; + u32 qbv_config_change_errors; + bool qbv_transition; + unsigned int qbv_count; + /* Access to oper_gate_closed, admin_gate_closed and qbv_transition + * are protected by the qbv_tx_lock. + */ + spinlock_t qbv_tx_lock; + + /* OS defined structs */ + struct pci_dev *pdev; + /* lock for statistics */ + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in igc_hw.h */ + struct igc_hw hw; + struct igc_hw_stats stats; + + struct igc_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + u16 tx_ring_count; + u16 rx_ring_count; + + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; + + u32 rss_queues; + u32 rss_indir_tbl_init; + + /* Any access to elements in nfc_rule_list is protected by the + * nfc_rule_lock. + */ + struct mutex nfc_rule_lock; + struct list_head nfc_rule_list; + unsigned int nfc_rule_count; + + u8 rss_indir_tbl[IGC_RETA_SIZE]; + + unsigned long link_check_timeout; + struct igc_info ei; + + u32 test_icr; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + /* Access to ptp_tx_skb and ptp_tx_start are protected by the + * ptp_tx_lock. + */ + spinlock_t ptp_tx_lock; + struct igc_tx_timestamp_request tx_tstamp[IGC_MAX_TX_TSTAMP_REGS]; + struct hwtstamp_config tstamp_config; + unsigned int ptp_flags; + /* System time value lock */ + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ + ktime_t ptp_reset_start; /* Reset time in clock mono */ + struct system_time_snapshot snapshot; + + char fw_version[32]; + + struct bpf_prog *xdp_prog; + + bool pps_sys_wrap_on; + + struct ptp_pin_desc sdp_config[IGC_N_SDP]; + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGC_N_PEROUT]; +}; + +void igc_up(struct igc_adapter *adapter); +void igc_down(struct igc_adapter *adapter); +int igc_open(struct net_device *netdev); +int igc_close(struct net_device *netdev); +int igc_setup_tx_resources(struct igc_ring *ring); +int igc_setup_rx_resources(struct igc_ring *ring); +void igc_free_tx_resources(struct igc_ring *ring); +void igc_free_rx_resources(struct igc_ring *ring); +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues); +int igc_reinit_queues(struct igc_adapter *adapter); +void igc_write_rss_indir_tbl(struct igc_adapter *adapter); +bool igc_has_link(struct igc_adapter *adapter); +void igc_reset(struct igc_adapter *adapter); +void igc_update_stats(struct igc_adapter *adapter); +void igc_disable_rx_ring(struct igc_ring *ring); +void igc_enable_rx_ring(struct igc_ring *ring); +void igc_disable_tx_ring(struct igc_ring *ring); +void igc_enable_tx_ring(struct igc_ring *ring); +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); + +/* igc_dump declarations */ +void igc_rings_dump(struct igc_adapter *adapter); +void igc_regs_dump(struct igc_adapter *adapter); + +extern char igc_driver_name[]; + +#define IGC_REGS_LEN 740 + +/* flags controlling PTP/1588 function */ +#define IGC_PTP_ENABLED BIT(0) + +/* Flags definitions */ +#define IGC_FLAG_HAS_MSI BIT(0) +#define IGC_FLAG_QUEUE_PAIRS BIT(3) +#define IGC_FLAG_DMAC BIT(4) +#define IGC_FLAG_PTP BIT(8) +#define IGC_FLAG_WOL_SUPPORTED BIT(8) +#define IGC_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGC_FLAG_HAS_MSIX BIT(13) +#define IGC_FLAG_EEE BIT(14) +#define IGC_FLAG_VLAN_PROMISC BIT(15) +#define IGC_FLAG_RX_LEGACY BIT(16) +#define IGC_FLAG_TSN_QBV_ENABLED BIT(17) +#define IGC_FLAG_TSN_QAV_ENABLED BIT(18) + +#define IGC_FLAG_TSN_ANY_ENABLED \ + (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED) + +#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) + +#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 + +/* RX-desc Write-Back format RSS Type's */ +enum igc_rss_type_num { + IGC_RSS_TYPE_NO_HASH = 0, + IGC_RSS_TYPE_HASH_TCP_IPV4 = 1, + IGC_RSS_TYPE_HASH_IPV4 = 2, + IGC_RSS_TYPE_HASH_TCP_IPV6 = 3, + IGC_RSS_TYPE_HASH_IPV6_EX = 4, + IGC_RSS_TYPE_HASH_IPV6 = 5, + IGC_RSS_TYPE_HASH_TCP_IPV6_EX = 6, + IGC_RSS_TYPE_HASH_UDP_IPV4 = 7, + IGC_RSS_TYPE_HASH_UDP_IPV6 = 8, + IGC_RSS_TYPE_HASH_UDP_IPV6_EX = 9, + IGC_RSS_TYPE_MAX = 10, +}; +#define IGC_RSS_TYPE_MAX_TABLE 16 +#define IGC_RSS_TYPE_MASK GENMASK(3,0) /* 4-bits (3:0) = mask 0x0F */ + +/* igc_rss_type - Rx descriptor RSS type field */ +static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc) +{ + /* RSS Type 4-bits (3:0) number: 0-9 (above 9 is reserved) + * Accessing the same bits via u16 (wb.lower.lo_dword.hs_rss.pkt_info) + * is slightly slower than via u32 (wb.lower.lo_dword.data) + */ + return le32_get_bits(rx_desc->wb.lower.lo_dword.data, IGC_RSS_TYPE_MASK); +} + +/* Interrupt defines */ +#define IGC_START_ITR 648 /* ~6000 ints/sec */ +#define IGC_4K_ITR 980 +#define IGC_20K_ITR 196 +#define IGC_70K_ITR 56 + +#define IGC_DEFAULT_ITR 3 /* dynamic */ +#define IGC_MAX_ITR_USECS 10000 +#define IGC_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_MSIX_ENTRIES 10 + +/* TX/RX descriptor defines */ +#define IGC_DEFAULT_TXD 256 +#define IGC_DEFAULT_TX_WORK 128 +#define IGC_MIN_TXD 64 +#define IGC_MAX_TXD 4096 + +#define IGC_DEFAULT_RXD 256 +#define IGC_MIN_RXD 64 +#define IGC_MAX_RXD 4096 + +/* Supported Rx Buffer Sizes */ +#define IGC_RXBUFFER_256 256 +#define IGC_RXBUFFER_2048 2048 +#define IGC_RXBUFFER_3072 3072 + +#define AUTO_ALL_MODES 0 +#define IGC_RX_HDR_LEN IGC_RXBUFFER_256 + +/* Transmit and receive latency (for PTP timestamps) */ +#define IGC_I225_TX_LATENCY_10 240 +#define IGC_I225_TX_LATENCY_100 58 +#define IGC_I225_TX_LATENCY_1000 80 +#define IGC_I225_TX_LATENCY_2500 1325 +#define IGC_I225_RX_LATENCY_10 6450 +#define IGC_I225_RX_LATENCY_100 185 +#define IGC_I225_RX_LATENCY_1000 300 +#define IGC_I225_RX_LATENCY_2500 1485 + +/* RX and TX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGC_RX_PTHRESH 8 +#define IGC_RX_HTHRESH 8 +#define IGC_TX_PTHRESH 8 +#define IGC_TX_HTHRESH 1 +#define IGC_RX_WTHRESH 4 +#define IGC_TX_WTHRESH 16 + +#define IGC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define IGC_TS_HDR_LEN 16 + +#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) + +#if (PAGE_SIZE < 8192) +#define IGC_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN) +#else +#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN) +#endif + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +/* VLAN info */ +#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGC_TX_FLAGS_VLAN_SHIFT 16 + +/* igc_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +enum igc_state_t { + __IGC_TESTING, + __IGC_RESETTING, + __IGC_DOWN, +}; + +enum igc_tx_flags { + /* cmd_type flags */ + IGC_TX_FLAGS_VLAN = 0x01, + IGC_TX_FLAGS_TSO = 0x02, + IGC_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGC_TX_FLAGS_IPV4 = 0x10, + IGC_TX_FLAGS_CSUM = 0x20, + + IGC_TX_FLAGS_TSTAMP_1 = 0x100, + IGC_TX_FLAGS_TSTAMP_2 = 0x200, + IGC_TX_FLAGS_TSTAMP_3 = 0x400, +}; + +enum igc_boards { + board_base, +}; + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGC_MAX_TXD_PWR 15 +#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +enum igc_tx_buffer_type { + IGC_TX_BUFFER_TYPE_SKB, + IGC_TX_BUFFER_TYPE_XDP, + IGC_TX_BUFFER_TYPE_XSK, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igc_tx_buffer { + union igc_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igc_tx_buffer_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igc_rx_buffer { + union { + struct { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; + }; + struct xdp_buff *xdp; + }; +}; + +/* context wrapper around xdp_buff to provide access to descriptor metadata */ +struct igc_xdp_buff { + struct xdp_buff xdp; + union igc_adv_rx_desc *rx_desc; + ktime_t rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */ +}; + +struct igc_q_vector { + struct igc_adapter *adapter; /* backlink */ + void __iomem *itr_register; + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + + struct igc_ring_container rx, tx; + + struct napi_struct napi; + + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + struct net_device poll_dev; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igc_ring ring[] ____cacheline_internodealigned_in_smp; +}; + +enum igc_filter_match_flags { + IGC_FILTER_FLAG_ETHER_TYPE = BIT(0), + IGC_FILTER_FLAG_VLAN_TCI = BIT(1), + IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2), + IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3), + IGC_FILTER_FLAG_USER_DATA = BIT(4), + IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5), +}; + +struct igc_nfc_filter { + u8 match_flags; + u16 etype; + __be16 vlan_etype; + u16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; + u8 user_data[8]; + u8 user_mask[8]; + u8 flex_index; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +struct igc_nfc_rule { + struct list_head list; + struct igc_nfc_filter filter; + u32 location; + u16 action; + bool flex; +}; + +/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority + * based, 8 ethertype based and 32 Flex filter based rules. + */ +#define IGC_MAX_RXNFC_RULES 64 + +struct igc_flex_filter { + u8 index; + u8 data[128]; + u8 mask[16]; + u8 length; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; + +/* igc_desc_unused - calculate if we have unused descriptors */ +static inline u16 igc_desc_unused(const struct igc_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline s32 igc_get_phy_info(struct igc_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline s32 igc_reset_phy(struct igc_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +enum igc_ring_flags_t { + IGC_RING_FLAG_RX_3K_BUFFER, + IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, + IGC_RING_FLAG_RX_SCTP_CSUM, + IGC_RING_FLAG_RX_LB_VLAN_BSWAP, + IGC_RING_FLAG_TX_CTX_IDX, + IGC_RING_FLAG_TX_DETECT_HANG, + IGC_RING_FLAG_AF_XDP_ZC, + IGC_RING_FLAG_TX_HWTSTAMP, +}; + +#define ring_uses_large_buffer(ring) \ + test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +#define ring_uses_build_skb(ring) \ + test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) + +static inline unsigned int igc_rx_bufsz(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGC_RXBUFFER_3072; + + if (ring_uses_build_skb(ring)) + return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN; +#endif + return IGC_RXBUFFER_2048; +} + +static inline unsigned int igc_rx_pg_order(struct igc_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return -EOPNOTSUPP; +} + +void igc_reinit_locked(struct igc_adapter *); +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location); +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); + +void igc_ptp_init(struct igc_adapter *adapter); +void igc_ptp_reset(struct igc_adapter *adapter); +void igc_ptp_suspend(struct igc_adapter *adapter); +void igc_ptp_stop(struct igc_adapter *adapter); +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf); +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igc_ptp_tx_hang(struct igc_adapter *adapter); +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); +void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter); + +#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) + +#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) + +#define IGC_RX_DESC(R, i) \ + (&(((union igc_adv_rx_desc *)((R)->desc))[i])) +#define IGC_TX_DESC(R, i) \ + (&(((union igc_adv_tx_desc *)((R)->desc))[i])) +#define IGC_TX_CTXTDESC(R, i) \ + (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i])) + +#endif /* _IGC_H_ */ diff --git a/devices/igc/igc_base-6.4-ethercat.c b/devices/igc/igc_base-6.4-ethercat.c new file mode 100644 index 00000000..c8a84787 --- /dev/null +++ b/devices/igc/igc_base-6.4-ethercat.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw-6.4-ethercat.h" +#include "igc_i225-6.4-ethercat.h" +#include "igc_mac-6.4-ethercat.h" +#include "igc_base-6.4-ethercat.h" +#include "igc-6.4-ethercat.h" + +/** + * igc_reset_hw_base - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_reset_hw_base(struct igc_hw *hw) +{ + s32 ret_val; + u32 ctrl; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igc_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(IGC_IMC, 0xffffffff); + + wr32(IGC_RCTL, 0); + wr32(IGC_TCTL, IGC_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(IGC_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); + + ret_val = igc_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* Clear any pending interrupt events. */ + wr32(IGC_IMC, 0xffffffff); + rd32(IGC_ICR); + + return ret_val; +} + +/** + * igc_init_nvm_params_base - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_nvm_params_base(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(IGC_EECD); + u16 size; + + size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> + IGC_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->type = igc_nvm_eeprom_spi; + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? + 16 : 8; + + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + return 0; +} + +/** + * igc_setup_copper_link_base - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + */ +static s32 igc_setup_copper_link_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_SLU; + ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); + wr32(IGC_CTRL, ctrl); + + ret_val = igc_setup_copper_link(hw); + + return ret_val; +} + +/** + * igc_init_mac_params_base - Init MAC func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_mac_params_base(struct igc_hw *hw) +{ + struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base; + struct igc_mac_info *mac = &hw->mac; + + /* Set mta register count */ + mac->mta_reg_count = 128; + mac->rar_entry_count = IGC_RAR_ENTRIES; + + /* reset */ + mac->ops.reset_hw = igc_reset_hw_base; + + mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; + mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; + + /* Allow a single clear of the SW semaphore on I225 */ + if (mac->type == igc_i225) + dev_spec->clear_semaphore_once = true; + + /* physical interface link setup */ + mac->ops.setup_physical_interface = igc_setup_copper_link_base; + + return 0; +} + +/** + * igc_init_phy_params_base - Init PHY func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_phy_params_base(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; + phy->reset_delay_us = 100; + + /* set lan id */ + hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> + IGC_STATUS_FUNC_SHIFT; + + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY\n"); + goto out; + } + + ret_val = igc_get_phy_id(hw); + if (ret_val) + return ret_val; + + igc_check_for_copper_link(hw); + +out: + return ret_val; +} + +static s32 igc_get_invariants_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val = 0; + + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I226_LMVP: + case IGC_DEV_ID_I225_IT: + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_IT: + case IGC_DEV_ID_I221_V: + case IGC_DEV_ID_I226_BLANK_NVM: + case IGC_DEV_ID_I225_BLANK_NVM: + mac->type = igc_i225; + break; + default: + return -IGC_ERR_MAC_INIT; + } + + hw->phy.media_type = igc_media_type_copper; + + /* mac initialization and operations */ + ret_val = igc_init_mac_params_base(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igc_init_nvm_params_base(hw); + switch (hw->mac.type) { + case igc_i225: + ret_val = igc_init_nvm_params_i225(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igc_init_phy_params_base(hw); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * igc_acquire_phy_base - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_acquire_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igc_release_phy_base - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static void igc_release_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igc_init_hw_base - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + */ +static s32 igc_init_hw_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u16 i, rar_count = mac->rar_entry_count; + s32 ret_val = 0; + + /* Setup the receive address */ + igc_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(IGC_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(IGC_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igc_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igc_clear_hw_cntrs_base(hw); + + return ret_val; +} + +/** + * igc_power_down_phy_copper_base - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + */ +void igc_power_down_phy_copper_base(struct igc_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw))) + igc_power_down_phy_copper(hw); +} + +/** + * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + */ +void igc_rx_fifo_flush_base(struct igc_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(IGC_RFCTL); + rfctl |= IGC_RFCTL_IPV6_EX_DIS; + wr32(IGC_RFCTL, rfctl); + + if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(IGC_RXDCTL(i)); + wr32(IGC_RXDCTL(i), + rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(IGC_RXDCTL(i)); + if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); + + rlpml = rd32(IGC_RLPML); + wr32(IGC_RLPML, 0); + + rctl = rd32(IGC_RCTL); + temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); + temp_rctl |= IGC_RCTL_LPE; + + wr32(IGC_RCTL, temp_rctl); + wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(IGC_RXDCTL(i), rxdctl[i]); + wr32(IGC_RCTL, rctl); + wrfl(); + + wr32(IGC_RLPML, rlpml); + wr32(IGC_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(IGC_ROC); + rd32(IGC_RNBC); + rd32(IGC_MPC); +} + +bool igc_is_device_id_i225(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I225_IT: + return true; + default: + return false; + } +} + +bool igc_is_device_id_i226(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I226_IT: + return true; + default: + return false; + } +} + +static struct igc_mac_operations igc_mac_ops_base = { + .init_hw = igc_init_hw_base, + .check_for_link = igc_check_for_copper_link, + .rar_set = igc_rar_set, + .read_mac_addr = igc_read_mac_addr, + .get_speed_and_duplex = igc_get_speed_and_duplex_copper, +}; + +static const struct igc_phy_operations igc_phy_ops_base = { + .acquire = igc_acquire_phy_base, + .release = igc_release_phy_base, + .reset = igc_phy_hw_reset, + .read_reg = igc_read_phy_reg_gpy, + .write_reg = igc_write_phy_reg_gpy, +}; + +const struct igc_info igc_base_info = { + .get_invariants = igc_get_invariants_base, + .mac_ops = &igc_mac_ops_base, + .phy_ops = &igc_phy_ops_base, +}; diff --git a/devices/igc/igc_base-6.4-ethercat.h b/devices/igc/igc_base-6.4-ethercat.h new file mode 100644 index 00000000..9f3827ed --- /dev/null +++ b/devices/igc/igc_base-6.4-ethercat.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_BASE_H_ +#define _IGC_BASE_H_ + +/* forward declaration */ +void igc_rx_fifo_flush_base(struct igc_hw *hw); +void igc_power_down_phy_copper_base(struct igc_hw *hw); +bool igc_is_device_id_i225(struct igc_hw *hw); +bool igc_is_device_id_i226(struct igc_hw *hw); + +/* Transmit Descriptor - Advanced */ +union igc_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Context descriptors */ +struct igc_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 launch_time; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define IGC_RAR_ENTRIES 16 + +/* Receive Descriptor - Advanced */ +union igc_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Additional Transmit Descriptor Control definitions */ +#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */ + +/* Additional Receive Descriptor Control definitions */ +#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0) +#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \ + (x) / 1024) /* in 1 KB resolution */ +#define IGC_SRRCTL_BSIZEHDR_MASK GENMASK(13, 8) +#define IGC_SRRCTL_BSIZEHDR(x) FIELD_PREP(IGC_SRRCTL_BSIZEHDR_MASK, \ + (x) / 64) /* in 64 bytes resolution */ +#define IGC_SRRCTL_DESCTYPE_MASK GENMASK(27, 25) +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF FIELD_PREP(IGC_SRRCTL_DESCTYPE_MASK, 1) + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_base-6.4-orig.c b/devices/igc/igc_base-6.4-orig.c new file mode 100644 index 00000000..a1d815af --- /dev/null +++ b/devices/igc/igc_base-6.4-orig.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw.h" +#include "igc_i225.h" +#include "igc_mac.h" +#include "igc_base.h" +#include "igc.h" + +/** + * igc_reset_hw_base - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_reset_hw_base(struct igc_hw *hw) +{ + s32 ret_val; + u32 ctrl; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igc_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(IGC_IMC, 0xffffffff); + + wr32(IGC_RCTL, 0); + wr32(IGC_TCTL, IGC_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(IGC_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); + + ret_val = igc_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* Clear any pending interrupt events. */ + wr32(IGC_IMC, 0xffffffff); + rd32(IGC_ICR); + + return ret_val; +} + +/** + * igc_init_nvm_params_base - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_nvm_params_base(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(IGC_EECD); + u16 size; + + size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> + IGC_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->type = igc_nvm_eeprom_spi; + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? + 16 : 8; + + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + return 0; +} + +/** + * igc_setup_copper_link_base - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + */ +static s32 igc_setup_copper_link_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_SLU; + ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); + wr32(IGC_CTRL, ctrl); + + ret_val = igc_setup_copper_link(hw); + + return ret_val; +} + +/** + * igc_init_mac_params_base - Init MAC func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_mac_params_base(struct igc_hw *hw) +{ + struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base; + struct igc_mac_info *mac = &hw->mac; + + /* Set mta register count */ + mac->mta_reg_count = 128; + mac->rar_entry_count = IGC_RAR_ENTRIES; + + /* reset */ + mac->ops.reset_hw = igc_reset_hw_base; + + mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; + mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; + + /* Allow a single clear of the SW semaphore on I225 */ + if (mac->type == igc_i225) + dev_spec->clear_semaphore_once = true; + + /* physical interface link setup */ + mac->ops.setup_physical_interface = igc_setup_copper_link_base; + + return 0; +} + +/** + * igc_init_phy_params_base - Init PHY func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_phy_params_base(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; + phy->reset_delay_us = 100; + + /* set lan id */ + hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> + IGC_STATUS_FUNC_SHIFT; + + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY\n"); + goto out; + } + + ret_val = igc_get_phy_id(hw); + if (ret_val) + return ret_val; + + igc_check_for_copper_link(hw); + +out: + return ret_val; +} + +static s32 igc_get_invariants_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val = 0; + + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I226_LMVP: + case IGC_DEV_ID_I225_IT: + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_IT: + case IGC_DEV_ID_I221_V: + case IGC_DEV_ID_I226_BLANK_NVM: + case IGC_DEV_ID_I225_BLANK_NVM: + mac->type = igc_i225; + break; + default: + return -IGC_ERR_MAC_INIT; + } + + hw->phy.media_type = igc_media_type_copper; + + /* mac initialization and operations */ + ret_val = igc_init_mac_params_base(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igc_init_nvm_params_base(hw); + switch (hw->mac.type) { + case igc_i225: + ret_val = igc_init_nvm_params_i225(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igc_init_phy_params_base(hw); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * igc_acquire_phy_base - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_acquire_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igc_release_phy_base - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static void igc_release_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igc_init_hw_base - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + */ +static s32 igc_init_hw_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u16 i, rar_count = mac->rar_entry_count; + s32 ret_val = 0; + + /* Setup the receive address */ + igc_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(IGC_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(IGC_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igc_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igc_clear_hw_cntrs_base(hw); + + return ret_val; +} + +/** + * igc_power_down_phy_copper_base - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + */ +void igc_power_down_phy_copper_base(struct igc_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw))) + igc_power_down_phy_copper(hw); +} + +/** + * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + */ +void igc_rx_fifo_flush_base(struct igc_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(IGC_RFCTL); + rfctl |= IGC_RFCTL_IPV6_EX_DIS; + wr32(IGC_RFCTL, rfctl); + + if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(IGC_RXDCTL(i)); + wr32(IGC_RXDCTL(i), + rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(IGC_RXDCTL(i)); + if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); + + rlpml = rd32(IGC_RLPML); + wr32(IGC_RLPML, 0); + + rctl = rd32(IGC_RCTL); + temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); + temp_rctl |= IGC_RCTL_LPE; + + wr32(IGC_RCTL, temp_rctl); + wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(IGC_RXDCTL(i), rxdctl[i]); + wr32(IGC_RCTL, rctl); + wrfl(); + + wr32(IGC_RLPML, rlpml); + wr32(IGC_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(IGC_ROC); + rd32(IGC_RNBC); + rd32(IGC_MPC); +} + +bool igc_is_device_id_i225(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I225_IT: + return true; + default: + return false; + } +} + +bool igc_is_device_id_i226(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I226_IT: + return true; + default: + return false; + } +} + +static struct igc_mac_operations igc_mac_ops_base = { + .init_hw = igc_init_hw_base, + .check_for_link = igc_check_for_copper_link, + .rar_set = igc_rar_set, + .read_mac_addr = igc_read_mac_addr, + .get_speed_and_duplex = igc_get_speed_and_duplex_copper, +}; + +static const struct igc_phy_operations igc_phy_ops_base = { + .acquire = igc_acquire_phy_base, + .release = igc_release_phy_base, + .reset = igc_phy_hw_reset, + .read_reg = igc_read_phy_reg_gpy, + .write_reg = igc_write_phy_reg_gpy, +}; + +const struct igc_info igc_base_info = { + .get_invariants = igc_get_invariants_base, + .mac_ops = &igc_mac_ops_base, + .phy_ops = &igc_phy_ops_base, +}; diff --git a/devices/igc/igc_base-6.4-orig.h b/devices/igc/igc_base-6.4-orig.h new file mode 100644 index 00000000..9f3827ed --- /dev/null +++ b/devices/igc/igc_base-6.4-orig.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_BASE_H_ +#define _IGC_BASE_H_ + +/* forward declaration */ +void igc_rx_fifo_flush_base(struct igc_hw *hw); +void igc_power_down_phy_copper_base(struct igc_hw *hw); +bool igc_is_device_id_i225(struct igc_hw *hw); +bool igc_is_device_id_i226(struct igc_hw *hw); + +/* Transmit Descriptor - Advanced */ +union igc_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Context descriptors */ +struct igc_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 launch_time; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define IGC_RAR_ENTRIES 16 + +/* Receive Descriptor - Advanced */ +union igc_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Additional Transmit Descriptor Control definitions */ +#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */ + +/* Additional Receive Descriptor Control definitions */ +#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0) +#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \ + (x) / 1024) /* in 1 KB resolution */ +#define IGC_SRRCTL_BSIZEHDR_MASK GENMASK(13, 8) +#define IGC_SRRCTL_BSIZEHDR(x) FIELD_PREP(IGC_SRRCTL_BSIZEHDR_MASK, \ + (x) / 64) /* in 64 bytes resolution */ +#define IGC_SRRCTL_DESCTYPE_MASK GENMASK(27, 25) +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF FIELD_PREP(IGC_SRRCTL_DESCTYPE_MASK, 1) + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_base-6.6-ethercat.c b/devices/igc/igc_base-6.6-ethercat.c new file mode 100644 index 00000000..a04284e3 --- /dev/null +++ b/devices/igc/igc_base-6.6-ethercat.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw-6.6-ethercat.h" +#include "igc_i225-6.6-ethercat.h" +#include "igc_mac-6.6-ethercat.h" +#include "igc_base-6.6-ethercat.h" +#include "igc-6.6-ethercat.h" + +/** + * igc_reset_hw_base - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_reset_hw_base(struct igc_hw *hw) +{ + s32 ret_val; + u32 ctrl; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igc_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(IGC_IMC, 0xffffffff); + + wr32(IGC_RCTL, 0); + wr32(IGC_TCTL, IGC_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(IGC_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); + + ret_val = igc_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* Clear any pending interrupt events. */ + wr32(IGC_IMC, 0xffffffff); + rd32(IGC_ICR); + + return ret_val; +} + +/** + * igc_init_nvm_params_base - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_nvm_params_base(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(IGC_EECD); + u16 size; + + size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> + IGC_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->type = igc_nvm_eeprom_spi; + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? + 16 : 8; + + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + return 0; +} + +/** + * igc_setup_copper_link_base - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + */ +static s32 igc_setup_copper_link_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_SLU; + ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); + wr32(IGC_CTRL, ctrl); + + ret_val = igc_setup_copper_link(hw); + + return ret_val; +} + +/** + * igc_init_mac_params_base - Init MAC func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_mac_params_base(struct igc_hw *hw) +{ + struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base; + struct igc_mac_info *mac = &hw->mac; + + /* Set mta register count */ + mac->mta_reg_count = 128; + mac->rar_entry_count = IGC_RAR_ENTRIES; + + /* reset */ + mac->ops.reset_hw = igc_reset_hw_base; + + mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; + mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; + + /* Allow a single clear of the SW semaphore on I225 */ + if (mac->type == igc_i225) + dev_spec->clear_semaphore_once = true; + + /* physical interface link setup */ + mac->ops.setup_physical_interface = igc_setup_copper_link_base; + + return 0; +} + +/** + * igc_init_phy_params_base - Init PHY func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_phy_params_base(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; + phy->reset_delay_us = 100; + + /* set lan id */ + hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> + IGC_STATUS_FUNC_SHIFT; + + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY\n"); + goto out; + } + + ret_val = igc_get_phy_id(hw); + if (ret_val) + return ret_val; + + igc_check_for_copper_link(hw); + +out: + return ret_val; +} + +static s32 igc_get_invariants_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val = 0; + + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I226_LMVP: + case IGC_DEV_ID_I225_IT: + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_IT: + case IGC_DEV_ID_I221_V: + case IGC_DEV_ID_I226_BLANK_NVM: + case IGC_DEV_ID_I225_BLANK_NVM: + mac->type = igc_i225; + break; + default: + return -IGC_ERR_MAC_INIT; + } + + hw->phy.media_type = igc_media_type_copper; + + /* mac initialization and operations */ + ret_val = igc_init_mac_params_base(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igc_init_nvm_params_base(hw); + switch (hw->mac.type) { + case igc_i225: + ret_val = igc_init_nvm_params_i225(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igc_init_phy_params_base(hw); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * igc_acquire_phy_base - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_acquire_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igc_release_phy_base - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static void igc_release_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igc_init_hw_base - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + */ +static s32 igc_init_hw_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u16 i, rar_count = mac->rar_entry_count; + s32 ret_val = 0; + + /* Setup the receive address */ + igc_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(IGC_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(IGC_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igc_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igc_clear_hw_cntrs_base(hw); + + return ret_val; +} + +/** + * igc_power_down_phy_copper_base - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + */ +void igc_power_down_phy_copper_base(struct igc_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw))) + igc_power_down_phy_copper(hw); +} + +/** + * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + */ +void igc_rx_fifo_flush_base(struct igc_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(IGC_RFCTL); + rfctl |= IGC_RFCTL_IPV6_EX_DIS; + wr32(IGC_RFCTL, rfctl); + + if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(IGC_RXDCTL(i)); + wr32(IGC_RXDCTL(i), + rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(IGC_RXDCTL(i)); + if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); + + rlpml = rd32(IGC_RLPML); + wr32(IGC_RLPML, 0); + + rctl = rd32(IGC_RCTL); + temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); + temp_rctl |= IGC_RCTL_LPE; + + wr32(IGC_RCTL, temp_rctl); + wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(IGC_RXDCTL(i), rxdctl[i]); + wr32(IGC_RCTL, rctl); + wrfl(); + + wr32(IGC_RLPML, rlpml); + wr32(IGC_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(IGC_ROC); + rd32(IGC_RNBC); + rd32(IGC_MPC); +} + +bool igc_is_device_id_i225(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I225_IT: + return true; + default: + return false; + } +} + +bool igc_is_device_id_i226(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I226_IT: + return true; + default: + return false; + } +} + +static struct igc_mac_operations igc_mac_ops_base = { + .init_hw = igc_init_hw_base, + .check_for_link = igc_check_for_copper_link, + .rar_set = igc_rar_set, + .read_mac_addr = igc_read_mac_addr, + .get_speed_and_duplex = igc_get_speed_and_duplex_copper, +}; + +static const struct igc_phy_operations igc_phy_ops_base = { + .acquire = igc_acquire_phy_base, + .release = igc_release_phy_base, + .reset = igc_phy_hw_reset, + .read_reg = igc_read_phy_reg_gpy, + .write_reg = igc_write_phy_reg_gpy, +}; + +const struct igc_info igc_base_info = { + .get_invariants = igc_get_invariants_base, + .mac_ops = &igc_mac_ops_base, + .phy_ops = &igc_phy_ops_base, +}; diff --git a/devices/igc/igc_base-6.6-ethercat.h b/devices/igc/igc_base-6.6-ethercat.h new file mode 100644 index 00000000..f7d6491d --- /dev/null +++ b/devices/igc/igc_base-6.6-ethercat.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_BASE_H_ +#define _IGC_BASE_H_ + +/* forward declaration */ +void igc_rx_fifo_flush_base(struct igc_hw *hw); +void igc_power_down_phy_copper_base(struct igc_hw *hw); +bool igc_is_device_id_i225(struct igc_hw *hw); +bool igc_is_device_id_i226(struct igc_hw *hw); + +/* Transmit Descriptor - Advanced */ +union igc_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Context descriptors */ +struct igc_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 launch_time; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define IGC_ADVTXD_TSTAMP_REG_1 0x00010000 /* Select register 1 for timestamp */ +#define IGC_ADVTXD_TSTAMP_REG_2 0x00020000 /* Select register 2 for timestamp */ +#define IGC_ADVTXD_TSTAMP_REG_3 0x00030000 /* Select register 3 for timestamp */ +#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define IGC_RAR_ENTRIES 16 + +/* Receive Descriptor - Advanced */ +union igc_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Additional Transmit Descriptor Control definitions */ +#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */ + +/* Additional Receive Descriptor Control definitions */ +#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0) +#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \ + (x) / 1024) /* in 1 KB resolution */ +#define IGC_SRRCTL_BSIZEHDR_MASK GENMASK(13, 8) +#define IGC_SRRCTL_BSIZEHDR(x) FIELD_PREP(IGC_SRRCTL_BSIZEHDR_MASK, \ + (x) / 64) /* in 64 bytes resolution */ +#define IGC_SRRCTL_DESCTYPE_MASK GENMASK(27, 25) +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF FIELD_PREP(IGC_SRRCTL_DESCTYPE_MASK, 1) + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_base-6.6-orig.c b/devices/igc/igc_base-6.6-orig.c new file mode 100644 index 00000000..a1d815af --- /dev/null +++ b/devices/igc/igc_base-6.6-orig.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw.h" +#include "igc_i225.h" +#include "igc_mac.h" +#include "igc_base.h" +#include "igc.h" + +/** + * igc_reset_hw_base - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_reset_hw_base(struct igc_hw *hw) +{ + s32 ret_val; + u32 ctrl; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igc_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(IGC_IMC, 0xffffffff); + + wr32(IGC_RCTL, 0); + wr32(IGC_TCTL, IGC_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(IGC_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); + + ret_val = igc_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* Clear any pending interrupt events. */ + wr32(IGC_IMC, 0xffffffff); + rd32(IGC_ICR); + + return ret_val; +} + +/** + * igc_init_nvm_params_base - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_nvm_params_base(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(IGC_EECD); + u16 size; + + size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >> + IGC_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->type = igc_nvm_eeprom_spi; + nvm->word_size = BIT(size); + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? + 16 : 8; + + if (nvm->word_size == BIT(15)) + nvm->page_size = 128; + + return 0; +} + +/** + * igc_setup_copper_link_base - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + */ +static s32 igc_setup_copper_link_base(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_SLU; + ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); + wr32(IGC_CTRL, ctrl); + + ret_val = igc_setup_copper_link(hw); + + return ret_val; +} + +/** + * igc_init_mac_params_base - Init MAC func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_mac_params_base(struct igc_hw *hw) +{ + struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base; + struct igc_mac_info *mac = &hw->mac; + + /* Set mta register count */ + mac->mta_reg_count = 128; + mac->rar_entry_count = IGC_RAR_ENTRIES; + + /* reset */ + mac->ops.reset_hw = igc_reset_hw_base; + + mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; + mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; + + /* Allow a single clear of the SW semaphore on I225 */ + if (mac->type == igc_i225) + dev_spec->clear_semaphore_once = true; + + /* physical interface link setup */ + mac->ops.setup_physical_interface = igc_setup_copper_link_base; + + return 0; +} + +/** + * igc_init_phy_params_base - Init PHY func ptrs. + * @hw: pointer to the HW structure + */ +static s32 igc_init_phy_params_base(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; + phy->reset_delay_us = 100; + + /* set lan id */ + hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> + IGC_STATUS_FUNC_SHIFT; + + /* Make sure the PHY is in a good state. Several people have reported + * firmware leaving the PHY's page select register set to something + * other than the default of zero, which causes the PHY ID read to + * access something other than the intended register. + */ + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY\n"); + goto out; + } + + ret_val = igc_get_phy_id(hw); + if (ret_val) + return ret_val; + + igc_check_for_copper_link(hw); + +out: + return ret_val; +} + +static s32 igc_get_invariants_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + s32 ret_val = 0; + + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I220_V: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I226_LMVP: + case IGC_DEV_ID_I225_IT: + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_IT: + case IGC_DEV_ID_I221_V: + case IGC_DEV_ID_I226_BLANK_NVM: + case IGC_DEV_ID_I225_BLANK_NVM: + mac->type = igc_i225; + break; + default: + return -IGC_ERR_MAC_INIT; + } + + hw->phy.media_type = igc_media_type_copper; + + /* mac initialization and operations */ + ret_val = igc_init_mac_params_base(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igc_init_nvm_params_base(hw); + switch (hw->mac.type) { + case igc_i225: + ret_val = igc_init_nvm_params_i225(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igc_init_phy_params_base(hw); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * igc_acquire_phy_base - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static s32 igc_acquire_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igc_release_phy_base - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + */ +static void igc_release_phy_base(struct igc_hw *hw) +{ + u16 mask = IGC_SWFW_PHY0_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igc_init_hw_base - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + */ +static s32 igc_init_hw_base(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + u16 i, rar_count = mac->rar_entry_count; + s32 ret_val = 0; + + /* Setup the receive address */ + igc_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(IGC_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(IGC_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igc_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igc_clear_hw_cntrs_base(hw); + + return ret_val; +} + +/** + * igc_power_down_phy_copper_base - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + */ +void igc_power_down_phy_copper_base(struct igc_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw))) + igc_power_down_phy_copper(hw); +} + +/** + * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + */ +void igc_rx_fifo_flush_base(struct igc_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(IGC_RFCTL); + rfctl |= IGC_RFCTL_IPV6_EX_DIS; + wr32(IGC_RFCTL, rfctl); + + if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(IGC_RXDCTL(i)); + wr32(IGC_RXDCTL(i), + rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(IGC_RXDCTL(i)); + if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); + + rlpml = rd32(IGC_RLPML); + wr32(IGC_RLPML, 0); + + rctl = rd32(IGC_RCTL); + temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); + temp_rctl |= IGC_RCTL_LPE; + + wr32(IGC_RCTL, temp_rctl); + wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(IGC_RXDCTL(i), rxdctl[i]); + wr32(IGC_RCTL, rctl); + wrfl(); + + wr32(IGC_RLPML, rlpml); + wr32(IGC_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(IGC_ROC); + rd32(IGC_RNBC); + rd32(IGC_MPC); +} + +bool igc_is_device_id_i225(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I225_LM: + case IGC_DEV_ID_I225_V: + case IGC_DEV_ID_I225_I: + case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I225_LMVP: + case IGC_DEV_ID_I225_IT: + return true; + default: + return false; + } +} + +bool igc_is_device_id_i226(struct igc_hw *hw) +{ + switch (hw->device_id) { + case IGC_DEV_ID_I226_LM: + case IGC_DEV_ID_I226_V: + case IGC_DEV_ID_I226_K: + case IGC_DEV_ID_I226_IT: + return true; + default: + return false; + } +} + +static struct igc_mac_operations igc_mac_ops_base = { + .init_hw = igc_init_hw_base, + .check_for_link = igc_check_for_copper_link, + .rar_set = igc_rar_set, + .read_mac_addr = igc_read_mac_addr, + .get_speed_and_duplex = igc_get_speed_and_duplex_copper, +}; + +static const struct igc_phy_operations igc_phy_ops_base = { + .acquire = igc_acquire_phy_base, + .release = igc_release_phy_base, + .reset = igc_phy_hw_reset, + .read_reg = igc_read_phy_reg_gpy, + .write_reg = igc_write_phy_reg_gpy, +}; + +const struct igc_info igc_base_info = { + .get_invariants = igc_get_invariants_base, + .mac_ops = &igc_mac_ops_base, + .phy_ops = &igc_phy_ops_base, +}; diff --git a/devices/igc/igc_base-6.6-orig.h b/devices/igc/igc_base-6.6-orig.h new file mode 100644 index 00000000..f7d6491d --- /dev/null +++ b/devices/igc/igc_base-6.6-orig.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_BASE_H_ +#define _IGC_BASE_H_ + +/* forward declaration */ +void igc_rx_fifo_flush_base(struct igc_hw *hw); +void igc_power_down_phy_copper_base(struct igc_hw *hw); +bool igc_is_device_id_i225(struct igc_hw *hw); +bool igc_is_device_id_i226(struct igc_hw *hw); + +/* Transmit Descriptor - Advanced */ +union igc_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Context descriptors */ +struct igc_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 launch_time; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define IGC_ADVTXD_TSTAMP_REG_1 0x00010000 /* Select register 1 for timestamp */ +#define IGC_ADVTXD_TSTAMP_REG_2 0x00020000 /* Select register 2 for timestamp */ +#define IGC_ADVTXD_TSTAMP_REG_3 0x00030000 /* Select register 3 for timestamp */ +#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +#define IGC_RAR_ENTRIES 16 + +/* Receive Descriptor - Advanced */ +union igc_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Additional Transmit Descriptor Control definitions */ +#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */ + +/* Additional Receive Descriptor Control definitions */ +#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ + +/* SRRCTL bit definitions */ +#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0) +#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \ + (x) / 1024) /* in 1 KB resolution */ +#define IGC_SRRCTL_BSIZEHDR_MASK GENMASK(13, 8) +#define IGC_SRRCTL_BSIZEHDR(x) FIELD_PREP(IGC_SRRCTL_BSIZEHDR_MASK, \ + (x) / 64) /* in 64 bytes resolution */ +#define IGC_SRRCTL_DESCTYPE_MASK GENMASK(27, 25) +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF FIELD_PREP(IGC_SRRCTL_DESCTYPE_MASK, 1) + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_defines-6.4-ethercat.h b/devices/igc/igc_defines-6.4-ethercat.h new file mode 100644 index 00000000..44a50702 --- /dev/null +++ b/devices/igc/igc_defines-6.4-ethercat.h @@ -0,0 +1,680 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_DEFINES_H_ +#define _IGC_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ +#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IGC_WUFC_FLEX_HQ BIT(14) /* Flex Filters Host Queuing */ +#define IGC_WUFC_FLX0 BIT(16) /* Flexible Filter 0 Enable */ +#define IGC_WUFC_FLX1 BIT(17) /* Flexible Filter 1 Enable */ +#define IGC_WUFC_FLX2 BIT(18) /* Flexible Filter 2 Enable */ +#define IGC_WUFC_FLX3 BIT(19) /* Flexible Filter 3 Enable */ +#define IGC_WUFC_FLX4 BIT(20) /* Flexible Filter 4 Enable */ +#define IGC_WUFC_FLX5 BIT(21) /* Flexible Filter 5 Enable */ +#define IGC_WUFC_FLX6 BIT(22) /* Flexible Filter 6 Enable */ +#define IGC_WUFC_FLX7 BIT(23) /* Flexible Filter 7 Enable */ + +#define IGC_WUFC_FILTER_MASK GENMASK(23, 14) + +#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ + +/* Wake Up Status */ +#define IGC_WUS_EX 0x00000004 /* Directed Exact */ +#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + IGC_WUS_EX | \ + IGC_WUS_ARPD | \ + IGC_WUS_IPV4 | \ + IGC_WUS_IPV6 | \ + IGC_WUS_NSD) + +/* Wake Up Packet Length */ +#define IGC_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define IGC_WUPM_BYTES 128 + +/* Wakeup Filter Control Extended */ +#define IGC_WUFC_EXT_FLX8 BIT(8) /* Flexible Filter 8 Enable */ +#define IGC_WUFC_EXT_FLX9 BIT(9) /* Flexible Filter 9 Enable */ +#define IGC_WUFC_EXT_FLX10 BIT(10) /* Flexible Filter 10 Enable */ +#define IGC_WUFC_EXT_FLX11 BIT(11) /* Flexible Filter 11 Enable */ +#define IGC_WUFC_EXT_FLX12 BIT(12) /* Flexible Filter 12 Enable */ +#define IGC_WUFC_EXT_FLX13 BIT(13) /* Flexible Filter 13 Enable */ +#define IGC_WUFC_EXT_FLX14 BIT(14) /* Flexible Filter 14 Enable */ +#define IGC_WUFC_EXT_FLX15 BIT(15) /* Flexible Filter 15 Enable */ +#define IGC_WUFC_EXT_FLX16 BIT(16) /* Flexible Filter 16 Enable */ +#define IGC_WUFC_EXT_FLX17 BIT(17) /* Flexible Filter 17 Enable */ +#define IGC_WUFC_EXT_FLX18 BIT(18) /* Flexible Filter 18 Enable */ +#define IGC_WUFC_EXT_FLX19 BIT(19) /* Flexible Filter 19 Enable */ +#define IGC_WUFC_EXT_FLX20 BIT(20) /* Flexible Filter 20 Enable */ +#define IGC_WUFC_EXT_FLX21 BIT(21) /* Flexible Filter 21 Enable */ +#define IGC_WUFC_EXT_FLX22 BIT(22) /* Flexible Filter 22 Enable */ +#define IGC_WUFC_EXT_FLX23 BIT(23) /* Flexible Filter 23 Enable */ +#define IGC_WUFC_EXT_FLX24 BIT(24) /* Flexible Filter 24 Enable */ +#define IGC_WUFC_EXT_FLX25 BIT(25) /* Flexible Filter 25 Enable */ +#define IGC_WUFC_EXT_FLX26 BIT(26) /* Flexible Filter 26 Enable */ +#define IGC_WUFC_EXT_FLX27 BIT(27) /* Flexible Filter 27 Enable */ +#define IGC_WUFC_EXT_FLX28 BIT(28) /* Flexible Filter 28 Enable */ +#define IGC_WUFC_EXT_FLX29 BIT(29) /* Flexible Filter 29 Enable */ +#define IGC_WUFC_EXT_FLX30 BIT(30) /* Flexible Filter 30 Enable */ +#define IGC_WUFC_EXT_FLX31 BIT(31) /* Flexible Filter 31 Enable */ + +#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8) + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/*Blocks new Master requests */ +#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 +/* Status of Master requests. */ +#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define IGC_RAH_RAH_MASK 0x0000FFFF +#define IGC_RAH_ASEL_MASK 0x00030000 +#define IGC_RAH_ASEL_SRC_ADDR BIT(16) +#define IGC_RAH_QSEL_MASK 0x000C0000 +#define IGC_RAH_QSEL_SHIFT 18 +#define IGC_RAH_QSEL_ENABLE BIT(28) +#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ + +#define IGC_RAL_MAC_ADDR_LEN 4 +#define IGC_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define IGC_SUCCESS 0 +#define IGC_ERR_NVM 1 +#define IGC_ERR_PHY 2 +#define IGC_ERR_CONFIG 3 +#define IGC_ERR_PARAM 4 +#define IGC_ERR_MAC_INIT 5 +#define IGC_ERR_RESET 9 +#define IGC_ERR_MASTER_REQUESTS_PENDING 10 +#define IGC_ERR_BLK_PHY_RESET 12 +#define IGC_ERR_SWFW_SYNC 13 + +/* Device Control */ +#define IGC_CTRL_RST 0x04000000 /* Global reset */ + +#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ + +#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ + +#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 + +/* PBA constants */ +#define IGC_PBA_34K 0x0022 + +/* SW Semaphore Register */ +#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* SWFW_SYNC Definitions */ +#define IGC_SWFW_EEP_SM 0x1 +#define IGC_SWFW_PHY0_SM 0x2 + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ + +/* PHY GPY 211 registers */ +#define STANDARD_AN_REG_MASK 0x0007 /* MMD */ +#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ +#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ +#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ + +/* NVM Control */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 +#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ +#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define IGC_EECD_ADDR_BITS 0x00000400 +#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define IGC_EECD_SIZE_EX_SHIFT 11 +#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ +#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/ +#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ +#define IGC_FLUDONE_ATTEMPTS 20000 +#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ + +/* Offset to data in NVM read/write registers */ +#define IGC_NVM_RW_REG_DATA 16 +#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define IGC_NVM_RW_REG_START 1 /* Start operation */ +#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define IGC_NVM_DEV_STARTER 5 /* Dev_starter Version */ + +/* NVM Word Offsets */ +#define NVM_CHECKSUM_REG 0x003F + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* Collision related configuration parameters */ +#define IGC_COLLISION_THRESHOLD 15 +#define IGC_CT_SHIFT 4 +#define IGC_COLLISION_DISTANCE 63 +#define IGC_COLD_SHIFT 12 + +/* Device Status */ +#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define IGC_STATUS_FUNC_SHIFT 2 +#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */ +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 +#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */ +#define ADVERTISE_2500_FULL 0x0080 + +#define IGC_ALL_SPEED_DUPLEX_2500 ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 + +/* Interrupt Cause Read */ +#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */ +#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */ +#define IGC_ICR_LSC BIT(2) /* Link Status Change */ +#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */ +#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */ +#define IGC_ICR_RXO BIT(6) /* Rx overrun */ +#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ +#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */ +#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ + +/* If this bit asserted, the driver should claim the interrupt */ +#define IGC_ICR_INT_ASSERTED BIT(31) + +#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ + +#define IMS_ENABLE_MASK ( \ + IGC_IMS_RXT0 | \ + IGC_IMS_TXDW | \ + IGC_IMS_RXDMT0 | \ + IGC_IMS_RXSEQ | \ + IGC_IMS_LSC) + +/* Interrupt Mask Set */ +#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ +#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ +#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ +#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ +#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ +#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */ + +#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ +#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ + +/* Interrupt Cause Set */ +#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */ + +#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define IGC_IVAR_VALID 0x80 +#define IGC_GPIE_NSICR 0x00000001 +#define IGC_GPIE_MSIX_MODE 0x00000010 +#define IGC_GPIE_EIAME 0x40000000 +#define IGC_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */ + +/* Transmit Descriptor bit definitions */ +#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ +#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* IPSec Encrypt Enable */ +#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080 + +/* Transmit Control */ +#define IGC_TCTL_EN 0x00000002 /* enable Tx */ +#define IGC_TCTL_PSP 0x00000008 /* pad short packets */ +#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ +#define IGC_TCTL_COLD 0x003ff000 /* collision distance */ +#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 +/* Enable XON frame transmission */ +#define IGC_FCRTL_XONE 0x80000000 + +/* Management Control */ +#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ + +/* Receive Control */ +#define IGC_RCTL_RST 0x00000001 /* Software reset */ +#define IGC_RCTL_EN 0x00000002 /* enable */ +#define IGC_RCTL_SBP 0x00000004 /* store bad packet */ +#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define IGC_RCTL_LPE 0x00000020 /* long packet enable */ +#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ + +#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ + +/* Split Replication Receive Control */ +#define IGC_SRRCTL_TIMESTAMP 0x40000000 +#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14) +#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17) + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ + +#define IGC_RXDEXT_STATERR_LB 0x00040000 + +/* Advanced Receive Descriptor bit definitions */ +#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +#define IGC_RXDEXT_STATERR_L4E 0x20000000 +#define IGC_RXDEXT_STATERR_IPE 0x40000000 +#define IGC_RXDEXT_STATERR_RXE 0x80000000 + +#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Header split receive */ +#define IGC_RFCTL_IPV6_EX_DIS 0x00010000 +#define IGC_RFCTL_LEF 0x00040000 + +#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ + +#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ +#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ + +#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */ + +#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ +#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ + +/* Transmit Scheduling Latency */ +/* Latency between transmission scheduling (LaunchTime) and the time + * the packet is transmitted to the network in nanosecond. + */ +#define IGC_TXOFFSET_SPEED_10 0x000034BC +#define IGC_TXOFFSET_SPEED_100 0x00000578 +#define IGC_TXOFFSET_SPEED_1000 0x0000012C +#define IGC_TXOFFSET_SPEED_2500 0x00000578 + +/* Time Sync Interrupt Causes */ +#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */ +#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ + +#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS + +#define IGC_FTQF_VF_BP 0x00008000 +#define IGC_FTQF_1588_TIME_STAMP 0x08000000 +#define IGC_FTQF_MASK 0xF0000000 +#define IGC_FTQF_MASK_PROTO_BP 0x10000000 + +/* Time Sync Receive Control bit definitions */ +#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IGC_TSYNCRXCTL_TYPE_ALL 0x08 +#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ +#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */ + +/* Time Sync Receive Configuration */ +#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 + +/* Immediate Interrupt Receive */ +#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +/* Immediate Interrupt Receive Extended */ +#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ +#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ + +/* Time Sync Transmit Control bit definitions */ +#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ +#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ +#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ +#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ +#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ + +/* Timer selection bits */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for auxiliary time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for target time stamp */ + +/* TSAUXC Configuration Bits */ +#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define IGC_TSAUXC_DISABLE1 BIT(27) /* Disable SYSTIM0 Count Operation. */ +#define IGC_TSAUXC_DISABLE2 BIT(28) /* Disable SYSTIM1 Count Operation. */ +#define IGC_TSAUXC_DISABLE3 BIT(29) /* Disable SYSTIM2 Count Operation. */ +#define IGC_TSAUXC_DIS_TS_CLEAR BIT(30) /* Disable EN_TT0/1 auto clear. */ +#define IGC_TSAUXC_DISABLE0 BIT(31) /* Disable SYSTIM0 Count Operation. */ + +/* SDP Configuration Bits */ +#define IGC_AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define IGC_AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define IGC_AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define IGC_AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define IGC_TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define IGC_TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define IGC_TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define IGC_TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define IGC_TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define IGC_TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define IGC_TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define IGC_TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +/* Transmit Scheduling */ +#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 +#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 +#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080 + +#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 +#define IGC_TXQCTL_STRICT_CYCLE 0x00000002 +#define IGC_TXQCTL_STRICT_END 0x00000004 +#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0 +#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080 +#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0 + +#define IGC_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define IGC_TQAVCC_KEEP_CREDITS BIT(30) + +#define IGC_MAX_SR_QUEUES 2 + +/* Receive Checksum Control */ +#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* PCIe PTM Control */ +#define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ +#define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ +#define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ +#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2) +#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) + +#define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */ +#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ +#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ + +/* PCIe Digital Delay */ +#define IGC_PCIE_DIG_DELAY_DEFAULT 0x01440000 + +/* PCIe PHY Delay */ +#define IGC_PCIE_PHY_DELAY_DEFAULT 0x40900000 + +#define IGC_TIMADJ_ADJUST_METH 0x40000000 + +/* PCIe PTM Status */ +#define IGC_PTM_STAT_VALID BIT(0) /* PTM Status */ +#define IGC_PTM_STAT_RET_ERR BIT(1) /* Root port timeout */ +#define IGC_PTM_STAT_BAD_PTM_RES BIT(2) /* PTM Response msg instead of PTM Response Data */ +#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ +#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ +#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ + +/* PCIe PTM Cycle Control */ +#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ +#define IGC_PTM_CYCLE_CTRL_AUTO_CYC_EN BIT(31) /* PTM Cycle Control */ + +/* GPY211 - I225 defines */ +#define GPY_MMD_MASK 0xFFFF0000 +#define GPY_MMD_SHIFT 16 +#define GPY_REG_MASK 0x0000FFFF + +#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* MAC definitions */ +#define IGC_FACTPS_MNGCG 0x20000000 +#define IGC_FWSM_MODE_MASK 0xE +#define IGC_FWSM_MODE_SHIFT 1 + +/* Management Control */ +#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ + +/* PHY */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define IGC_GEN_POLL_TIMEOUT 1920 + +/* PHY Control Register */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */ + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* MDI Control */ +#define IGC_MDIC_DATA_MASK 0x0000FFFF +#define IGC_MDIC_REG_MASK 0x001F0000 +#define IGC_MDIC_REG_SHIFT 16 +#define IGC_MDIC_PHY_MASK 0x03E00000 +#define IGC_MDIC_PHY_SHIFT 21 +#define IGC_MDIC_OP_WRITE 0x04000000 +#define IGC_MDIC_OP_READ 0x08000000 +#define IGC_MDIC_READY 0x10000000 +#define IGC_MDIC_ERROR 0x40000000 + +#define IGC_N0_QUEUE -1 + +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + +#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLANPQF_QUEUE_MASK 0x03 + +#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */ +#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */ +#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ + +/* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + +/* EEE defines */ +#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */ +#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ + +/* LTR defines */ +#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */ +#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */ +#define IGC_TW_SYSTEM_1000_MASK 0x000000FF +/* Minimum time for 100BASE-T where no data will be transmit following move out + * of EEE LPI Tx state + */ +#define IGC_TW_SYSTEM_100_MASK 0x0000FF00 +#define IGC_TW_SYSTEM_100_SHIFT 8 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMINV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMINV_SCALE_32768 3 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMAXV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMAXV_SCALE_32768 3 +#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */ +#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */ +#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMINV_SCALE_SHIFT 10 +#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMAXV_SCALE_SHIFT 10 + +#endif /* _IGC_DEFINES_H_ */ diff --git a/devices/igc/igc_defines-6.4-orig.h b/devices/igc/igc_defines-6.4-orig.h new file mode 100644 index 00000000..44a50702 --- /dev/null +++ b/devices/igc/igc_defines-6.4-orig.h @@ -0,0 +1,680 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_DEFINES_H_ +#define _IGC_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ +#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IGC_WUFC_FLEX_HQ BIT(14) /* Flex Filters Host Queuing */ +#define IGC_WUFC_FLX0 BIT(16) /* Flexible Filter 0 Enable */ +#define IGC_WUFC_FLX1 BIT(17) /* Flexible Filter 1 Enable */ +#define IGC_WUFC_FLX2 BIT(18) /* Flexible Filter 2 Enable */ +#define IGC_WUFC_FLX3 BIT(19) /* Flexible Filter 3 Enable */ +#define IGC_WUFC_FLX4 BIT(20) /* Flexible Filter 4 Enable */ +#define IGC_WUFC_FLX5 BIT(21) /* Flexible Filter 5 Enable */ +#define IGC_WUFC_FLX6 BIT(22) /* Flexible Filter 6 Enable */ +#define IGC_WUFC_FLX7 BIT(23) /* Flexible Filter 7 Enable */ + +#define IGC_WUFC_FILTER_MASK GENMASK(23, 14) + +#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ + +/* Wake Up Status */ +#define IGC_WUS_EX 0x00000004 /* Directed Exact */ +#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + IGC_WUS_EX | \ + IGC_WUS_ARPD | \ + IGC_WUS_IPV4 | \ + IGC_WUS_IPV6 | \ + IGC_WUS_NSD) + +/* Wake Up Packet Length */ +#define IGC_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define IGC_WUPM_BYTES 128 + +/* Wakeup Filter Control Extended */ +#define IGC_WUFC_EXT_FLX8 BIT(8) /* Flexible Filter 8 Enable */ +#define IGC_WUFC_EXT_FLX9 BIT(9) /* Flexible Filter 9 Enable */ +#define IGC_WUFC_EXT_FLX10 BIT(10) /* Flexible Filter 10 Enable */ +#define IGC_WUFC_EXT_FLX11 BIT(11) /* Flexible Filter 11 Enable */ +#define IGC_WUFC_EXT_FLX12 BIT(12) /* Flexible Filter 12 Enable */ +#define IGC_WUFC_EXT_FLX13 BIT(13) /* Flexible Filter 13 Enable */ +#define IGC_WUFC_EXT_FLX14 BIT(14) /* Flexible Filter 14 Enable */ +#define IGC_WUFC_EXT_FLX15 BIT(15) /* Flexible Filter 15 Enable */ +#define IGC_WUFC_EXT_FLX16 BIT(16) /* Flexible Filter 16 Enable */ +#define IGC_WUFC_EXT_FLX17 BIT(17) /* Flexible Filter 17 Enable */ +#define IGC_WUFC_EXT_FLX18 BIT(18) /* Flexible Filter 18 Enable */ +#define IGC_WUFC_EXT_FLX19 BIT(19) /* Flexible Filter 19 Enable */ +#define IGC_WUFC_EXT_FLX20 BIT(20) /* Flexible Filter 20 Enable */ +#define IGC_WUFC_EXT_FLX21 BIT(21) /* Flexible Filter 21 Enable */ +#define IGC_WUFC_EXT_FLX22 BIT(22) /* Flexible Filter 22 Enable */ +#define IGC_WUFC_EXT_FLX23 BIT(23) /* Flexible Filter 23 Enable */ +#define IGC_WUFC_EXT_FLX24 BIT(24) /* Flexible Filter 24 Enable */ +#define IGC_WUFC_EXT_FLX25 BIT(25) /* Flexible Filter 25 Enable */ +#define IGC_WUFC_EXT_FLX26 BIT(26) /* Flexible Filter 26 Enable */ +#define IGC_WUFC_EXT_FLX27 BIT(27) /* Flexible Filter 27 Enable */ +#define IGC_WUFC_EXT_FLX28 BIT(28) /* Flexible Filter 28 Enable */ +#define IGC_WUFC_EXT_FLX29 BIT(29) /* Flexible Filter 29 Enable */ +#define IGC_WUFC_EXT_FLX30 BIT(30) /* Flexible Filter 30 Enable */ +#define IGC_WUFC_EXT_FLX31 BIT(31) /* Flexible Filter 31 Enable */ + +#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8) + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/*Blocks new Master requests */ +#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 +/* Status of Master requests. */ +#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define IGC_RAH_RAH_MASK 0x0000FFFF +#define IGC_RAH_ASEL_MASK 0x00030000 +#define IGC_RAH_ASEL_SRC_ADDR BIT(16) +#define IGC_RAH_QSEL_MASK 0x000C0000 +#define IGC_RAH_QSEL_SHIFT 18 +#define IGC_RAH_QSEL_ENABLE BIT(28) +#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ + +#define IGC_RAL_MAC_ADDR_LEN 4 +#define IGC_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define IGC_SUCCESS 0 +#define IGC_ERR_NVM 1 +#define IGC_ERR_PHY 2 +#define IGC_ERR_CONFIG 3 +#define IGC_ERR_PARAM 4 +#define IGC_ERR_MAC_INIT 5 +#define IGC_ERR_RESET 9 +#define IGC_ERR_MASTER_REQUESTS_PENDING 10 +#define IGC_ERR_BLK_PHY_RESET 12 +#define IGC_ERR_SWFW_SYNC 13 + +/* Device Control */ +#define IGC_CTRL_RST 0x04000000 /* Global reset */ + +#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ + +#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ + +#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 + +/* PBA constants */ +#define IGC_PBA_34K 0x0022 + +/* SW Semaphore Register */ +#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* SWFW_SYNC Definitions */ +#define IGC_SWFW_EEP_SM 0x1 +#define IGC_SWFW_PHY0_SM 0x2 + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ + +/* PHY GPY 211 registers */ +#define STANDARD_AN_REG_MASK 0x0007 /* MMD */ +#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ +#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ +#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ + +/* NVM Control */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 +#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ +#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define IGC_EECD_ADDR_BITS 0x00000400 +#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define IGC_EECD_SIZE_EX_SHIFT 11 +#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ +#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/ +#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ +#define IGC_FLUDONE_ATTEMPTS 20000 +#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ + +/* Offset to data in NVM read/write registers */ +#define IGC_NVM_RW_REG_DATA 16 +#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define IGC_NVM_RW_REG_START 1 /* Start operation */ +#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define IGC_NVM_DEV_STARTER 5 /* Dev_starter Version */ + +/* NVM Word Offsets */ +#define NVM_CHECKSUM_REG 0x003F + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* Collision related configuration parameters */ +#define IGC_COLLISION_THRESHOLD 15 +#define IGC_CT_SHIFT 4 +#define IGC_COLLISION_DISTANCE 63 +#define IGC_COLD_SHIFT 12 + +/* Device Status */ +#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define IGC_STATUS_FUNC_SHIFT 2 +#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */ +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 +#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */ +#define ADVERTISE_2500_FULL 0x0080 + +#define IGC_ALL_SPEED_DUPLEX_2500 ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 + +/* Interrupt Cause Read */ +#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */ +#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */ +#define IGC_ICR_LSC BIT(2) /* Link Status Change */ +#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */ +#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */ +#define IGC_ICR_RXO BIT(6) /* Rx overrun */ +#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ +#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */ +#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ + +/* If this bit asserted, the driver should claim the interrupt */ +#define IGC_ICR_INT_ASSERTED BIT(31) + +#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ + +#define IMS_ENABLE_MASK ( \ + IGC_IMS_RXT0 | \ + IGC_IMS_TXDW | \ + IGC_IMS_RXDMT0 | \ + IGC_IMS_RXSEQ | \ + IGC_IMS_LSC) + +/* Interrupt Mask Set */ +#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ +#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ +#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ +#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ +#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ +#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */ + +#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ +#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ + +/* Interrupt Cause Set */ +#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */ + +#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define IGC_IVAR_VALID 0x80 +#define IGC_GPIE_NSICR 0x00000001 +#define IGC_GPIE_MSIX_MODE 0x00000010 +#define IGC_GPIE_EIAME 0x40000000 +#define IGC_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */ + +/* Transmit Descriptor bit definitions */ +#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ +#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* IPSec Encrypt Enable */ +#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080 + +/* Transmit Control */ +#define IGC_TCTL_EN 0x00000002 /* enable Tx */ +#define IGC_TCTL_PSP 0x00000008 /* pad short packets */ +#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ +#define IGC_TCTL_COLD 0x003ff000 /* collision distance */ +#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 +/* Enable XON frame transmission */ +#define IGC_FCRTL_XONE 0x80000000 + +/* Management Control */ +#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ + +/* Receive Control */ +#define IGC_RCTL_RST 0x00000001 /* Software reset */ +#define IGC_RCTL_EN 0x00000002 /* enable */ +#define IGC_RCTL_SBP 0x00000004 /* store bad packet */ +#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define IGC_RCTL_LPE 0x00000020 /* long packet enable */ +#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ + +#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ + +/* Split Replication Receive Control */ +#define IGC_SRRCTL_TIMESTAMP 0x40000000 +#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14) +#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17) + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ + +#define IGC_RXDEXT_STATERR_LB 0x00040000 + +/* Advanced Receive Descriptor bit definitions */ +#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +#define IGC_RXDEXT_STATERR_L4E 0x20000000 +#define IGC_RXDEXT_STATERR_IPE 0x40000000 +#define IGC_RXDEXT_STATERR_RXE 0x80000000 + +#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Header split receive */ +#define IGC_RFCTL_IPV6_EX_DIS 0x00010000 +#define IGC_RFCTL_LEF 0x00040000 + +#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ + +#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ +#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ + +#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */ + +#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ +#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ + +/* Transmit Scheduling Latency */ +/* Latency between transmission scheduling (LaunchTime) and the time + * the packet is transmitted to the network in nanosecond. + */ +#define IGC_TXOFFSET_SPEED_10 0x000034BC +#define IGC_TXOFFSET_SPEED_100 0x00000578 +#define IGC_TXOFFSET_SPEED_1000 0x0000012C +#define IGC_TXOFFSET_SPEED_2500 0x00000578 + +/* Time Sync Interrupt Causes */ +#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */ +#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ + +#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS + +#define IGC_FTQF_VF_BP 0x00008000 +#define IGC_FTQF_1588_TIME_STAMP 0x08000000 +#define IGC_FTQF_MASK 0xF0000000 +#define IGC_FTQF_MASK_PROTO_BP 0x10000000 + +/* Time Sync Receive Control bit definitions */ +#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IGC_TSYNCRXCTL_TYPE_ALL 0x08 +#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ +#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */ + +/* Time Sync Receive Configuration */ +#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 + +/* Immediate Interrupt Receive */ +#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +/* Immediate Interrupt Receive Extended */ +#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ +#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ + +/* Time Sync Transmit Control bit definitions */ +#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ +#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ +#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ +#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ +#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ + +/* Timer selection bits */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for auxiliary time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for target time stamp */ + +/* TSAUXC Configuration Bits */ +#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define IGC_TSAUXC_DISABLE1 BIT(27) /* Disable SYSTIM0 Count Operation. */ +#define IGC_TSAUXC_DISABLE2 BIT(28) /* Disable SYSTIM1 Count Operation. */ +#define IGC_TSAUXC_DISABLE3 BIT(29) /* Disable SYSTIM2 Count Operation. */ +#define IGC_TSAUXC_DIS_TS_CLEAR BIT(30) /* Disable EN_TT0/1 auto clear. */ +#define IGC_TSAUXC_DISABLE0 BIT(31) /* Disable SYSTIM0 Count Operation. */ + +/* SDP Configuration Bits */ +#define IGC_AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define IGC_AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define IGC_AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define IGC_AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define IGC_TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define IGC_TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define IGC_TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define IGC_TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define IGC_TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define IGC_TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define IGC_TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define IGC_TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +/* Transmit Scheduling */ +#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 +#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 +#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080 + +#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 +#define IGC_TXQCTL_STRICT_CYCLE 0x00000002 +#define IGC_TXQCTL_STRICT_END 0x00000004 +#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0 +#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080 +#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0 + +#define IGC_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define IGC_TQAVCC_KEEP_CREDITS BIT(30) + +#define IGC_MAX_SR_QUEUES 2 + +/* Receive Checksum Control */ +#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* PCIe PTM Control */ +#define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ +#define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ +#define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ +#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2) +#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) + +#define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */ +#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ +#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ + +/* PCIe Digital Delay */ +#define IGC_PCIE_DIG_DELAY_DEFAULT 0x01440000 + +/* PCIe PHY Delay */ +#define IGC_PCIE_PHY_DELAY_DEFAULT 0x40900000 + +#define IGC_TIMADJ_ADJUST_METH 0x40000000 + +/* PCIe PTM Status */ +#define IGC_PTM_STAT_VALID BIT(0) /* PTM Status */ +#define IGC_PTM_STAT_RET_ERR BIT(1) /* Root port timeout */ +#define IGC_PTM_STAT_BAD_PTM_RES BIT(2) /* PTM Response msg instead of PTM Response Data */ +#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ +#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ +#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ + +/* PCIe PTM Cycle Control */ +#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ +#define IGC_PTM_CYCLE_CTRL_AUTO_CYC_EN BIT(31) /* PTM Cycle Control */ + +/* GPY211 - I225 defines */ +#define GPY_MMD_MASK 0xFFFF0000 +#define GPY_MMD_SHIFT 16 +#define GPY_REG_MASK 0x0000FFFF + +#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* MAC definitions */ +#define IGC_FACTPS_MNGCG 0x20000000 +#define IGC_FWSM_MODE_MASK 0xE +#define IGC_FWSM_MODE_SHIFT 1 + +/* Management Control */ +#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ + +/* PHY */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define IGC_GEN_POLL_TIMEOUT 1920 + +/* PHY Control Register */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */ + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* MDI Control */ +#define IGC_MDIC_DATA_MASK 0x0000FFFF +#define IGC_MDIC_REG_MASK 0x001F0000 +#define IGC_MDIC_REG_SHIFT 16 +#define IGC_MDIC_PHY_MASK 0x03E00000 +#define IGC_MDIC_PHY_SHIFT 21 +#define IGC_MDIC_OP_WRITE 0x04000000 +#define IGC_MDIC_OP_READ 0x08000000 +#define IGC_MDIC_READY 0x10000000 +#define IGC_MDIC_ERROR 0x40000000 + +#define IGC_N0_QUEUE -1 + +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + +#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLANPQF_QUEUE_MASK 0x03 + +#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */ +#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */ +#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ + +/* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + +/* EEE defines */ +#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */ +#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ + +/* LTR defines */ +#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */ +#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */ +#define IGC_TW_SYSTEM_1000_MASK 0x000000FF +/* Minimum time for 100BASE-T where no data will be transmit following move out + * of EEE LPI Tx state + */ +#define IGC_TW_SYSTEM_100_MASK 0x0000FF00 +#define IGC_TW_SYSTEM_100_SHIFT 8 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMINV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMINV_SCALE_32768 3 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMAXV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMAXV_SCALE_32768 3 +#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */ +#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */ +#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMINV_SCALE_SHIFT 10 +#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMAXV_SCALE_SHIFT 10 + +#endif /* _IGC_DEFINES_H_ */ diff --git a/devices/igc/igc_defines-6.6-ethercat.h b/devices/igc/igc_defines-6.6-ethercat.h new file mode 100644 index 00000000..b3037016 --- /dev/null +++ b/devices/igc/igc_defines-6.6-ethercat.h @@ -0,0 +1,687 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_DEFINES_H_ +#define _IGC_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ +#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IGC_WUFC_FLEX_HQ BIT(14) /* Flex Filters Host Queuing */ +#define IGC_WUFC_FLX0 BIT(16) /* Flexible Filter 0 Enable */ +#define IGC_WUFC_FLX1 BIT(17) /* Flexible Filter 1 Enable */ +#define IGC_WUFC_FLX2 BIT(18) /* Flexible Filter 2 Enable */ +#define IGC_WUFC_FLX3 BIT(19) /* Flexible Filter 3 Enable */ +#define IGC_WUFC_FLX4 BIT(20) /* Flexible Filter 4 Enable */ +#define IGC_WUFC_FLX5 BIT(21) /* Flexible Filter 5 Enable */ +#define IGC_WUFC_FLX6 BIT(22) /* Flexible Filter 6 Enable */ +#define IGC_WUFC_FLX7 BIT(23) /* Flexible Filter 7 Enable */ + +#define IGC_WUFC_FILTER_MASK GENMASK(23, 14) + +#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ + +/* Wake Up Status */ +#define IGC_WUS_EX 0x00000004 /* Directed Exact */ +#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + IGC_WUS_EX | \ + IGC_WUS_ARPD | \ + IGC_WUS_IPV4 | \ + IGC_WUS_IPV6 | \ + IGC_WUS_NSD) + +/* Wake Up Packet Length */ +#define IGC_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define IGC_WUPM_BYTES 128 + +/* Wakeup Filter Control Extended */ +#define IGC_WUFC_EXT_FLX8 BIT(8) /* Flexible Filter 8 Enable */ +#define IGC_WUFC_EXT_FLX9 BIT(9) /* Flexible Filter 9 Enable */ +#define IGC_WUFC_EXT_FLX10 BIT(10) /* Flexible Filter 10 Enable */ +#define IGC_WUFC_EXT_FLX11 BIT(11) /* Flexible Filter 11 Enable */ +#define IGC_WUFC_EXT_FLX12 BIT(12) /* Flexible Filter 12 Enable */ +#define IGC_WUFC_EXT_FLX13 BIT(13) /* Flexible Filter 13 Enable */ +#define IGC_WUFC_EXT_FLX14 BIT(14) /* Flexible Filter 14 Enable */ +#define IGC_WUFC_EXT_FLX15 BIT(15) /* Flexible Filter 15 Enable */ +#define IGC_WUFC_EXT_FLX16 BIT(16) /* Flexible Filter 16 Enable */ +#define IGC_WUFC_EXT_FLX17 BIT(17) /* Flexible Filter 17 Enable */ +#define IGC_WUFC_EXT_FLX18 BIT(18) /* Flexible Filter 18 Enable */ +#define IGC_WUFC_EXT_FLX19 BIT(19) /* Flexible Filter 19 Enable */ +#define IGC_WUFC_EXT_FLX20 BIT(20) /* Flexible Filter 20 Enable */ +#define IGC_WUFC_EXT_FLX21 BIT(21) /* Flexible Filter 21 Enable */ +#define IGC_WUFC_EXT_FLX22 BIT(22) /* Flexible Filter 22 Enable */ +#define IGC_WUFC_EXT_FLX23 BIT(23) /* Flexible Filter 23 Enable */ +#define IGC_WUFC_EXT_FLX24 BIT(24) /* Flexible Filter 24 Enable */ +#define IGC_WUFC_EXT_FLX25 BIT(25) /* Flexible Filter 25 Enable */ +#define IGC_WUFC_EXT_FLX26 BIT(26) /* Flexible Filter 26 Enable */ +#define IGC_WUFC_EXT_FLX27 BIT(27) /* Flexible Filter 27 Enable */ +#define IGC_WUFC_EXT_FLX28 BIT(28) /* Flexible Filter 28 Enable */ +#define IGC_WUFC_EXT_FLX29 BIT(29) /* Flexible Filter 29 Enable */ +#define IGC_WUFC_EXT_FLX30 BIT(30) /* Flexible Filter 30 Enable */ +#define IGC_WUFC_EXT_FLX31 BIT(31) /* Flexible Filter 31 Enable */ + +#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8) + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/*Blocks new Master requests */ +#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 +/* Status of Master requests. */ +#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define IGC_RAH_RAH_MASK 0x0000FFFF +#define IGC_RAH_ASEL_MASK 0x00030000 +#define IGC_RAH_ASEL_SRC_ADDR BIT(16) +#define IGC_RAH_QSEL_MASK 0x000C0000 +#define IGC_RAH_QSEL_SHIFT 18 +#define IGC_RAH_QSEL_ENABLE BIT(28) +#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ + +#define IGC_RAL_MAC_ADDR_LEN 4 +#define IGC_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define IGC_SUCCESS 0 +#define IGC_ERR_NVM 1 +#define IGC_ERR_PHY 2 +#define IGC_ERR_CONFIG 3 +#define IGC_ERR_PARAM 4 +#define IGC_ERR_MAC_INIT 5 +#define IGC_ERR_RESET 9 +#define IGC_ERR_MASTER_REQUESTS_PENDING 10 +#define IGC_ERR_BLK_PHY_RESET 12 +#define IGC_ERR_SWFW_SYNC 13 + +/* Device Control */ +#define IGC_CTRL_RST 0x04000000 /* Global reset */ + +#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ + +#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ + +#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 + +/* PBA constants */ +#define IGC_PBA_34K 0x0022 + +/* SW Semaphore Register */ +#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* SWFW_SYNC Definitions */ +#define IGC_SWFW_EEP_SM 0x1 +#define IGC_SWFW_PHY0_SM 0x2 + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ + +/* PHY GPY 211 registers */ +#define STANDARD_AN_REG_MASK 0x0007 /* MMD */ +#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ +#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ +#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ + +/* NVM Control */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 +#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ +#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define IGC_EECD_ADDR_BITS 0x00000400 +#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define IGC_EECD_SIZE_EX_SHIFT 11 +#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ +#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/ +#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ +#define IGC_FLUDONE_ATTEMPTS 20000 +#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ + +/* Offset to data in NVM read/write registers */ +#define IGC_NVM_RW_REG_DATA 16 +#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define IGC_NVM_RW_REG_START 1 /* Start operation */ +#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define IGC_NVM_DEV_STARTER 5 /* Dev_starter Version */ + +/* NVM Word Offsets */ +#define NVM_CHECKSUM_REG 0x003F + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* Collision related configuration parameters */ +#define IGC_COLLISION_THRESHOLD 15 +#define IGC_CT_SHIFT 4 +#define IGC_COLLISION_DISTANCE 63 +#define IGC_COLD_SHIFT 12 + +/* Device Status */ +#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define IGC_STATUS_FUNC_SHIFT 2 +#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */ +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 +#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */ +#define ADVERTISE_2500_FULL 0x0080 + +#define IGC_ALL_SPEED_DUPLEX_2500 ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 + +/* Interrupt Cause Read */ +#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */ +#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */ +#define IGC_ICR_LSC BIT(2) /* Link Status Change */ +#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */ +#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */ +#define IGC_ICR_RXO BIT(6) /* Rx overrun */ +#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ +#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */ +#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ + +/* If this bit asserted, the driver should claim the interrupt */ +#define IGC_ICR_INT_ASSERTED BIT(31) + +#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ + +#define IMS_ENABLE_MASK ( \ + IGC_IMS_RXT0 | \ + IGC_IMS_TXDW | \ + IGC_IMS_RXDMT0 | \ + IGC_IMS_RXSEQ | \ + IGC_IMS_LSC) + +/* Interrupt Mask Set */ +#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ +#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ +#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ +#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ +#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ +#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */ + +#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ +#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ + +/* Interrupt Cause Set */ +#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */ + +#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define IGC_IVAR_VALID 0x80 +#define IGC_GPIE_NSICR 0x00000001 +#define IGC_GPIE_MSIX_MODE 0x00000010 +#define IGC_GPIE_EIAME 0x40000000 +#define IGC_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */ + +/* Transmit Descriptor bit definitions */ +#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ +#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* IPSec Encrypt Enable */ +#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080 + +/* Transmit Control */ +#define IGC_TCTL_EN 0x00000002 /* enable Tx */ +#define IGC_TCTL_PSP 0x00000008 /* pad short packets */ +#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ +#define IGC_TCTL_COLD 0x003ff000 /* collision distance */ +#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 +/* Enable XON frame transmission */ +#define IGC_FCRTL_XONE 0x80000000 + +/* Management Control */ +#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ + +/* Receive Control */ +#define IGC_RCTL_RST 0x00000001 /* Software reset */ +#define IGC_RCTL_EN 0x00000002 /* enable */ +#define IGC_RCTL_SBP 0x00000004 /* store bad packet */ +#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define IGC_RCTL_LPE 0x00000020 /* long packet enable */ +#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ + +#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ + +/* Split Replication Receive Control */ +#define IGC_SRRCTL_TIMESTAMP 0x40000000 +#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14) +#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17) + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ + +#define IGC_RXDEXT_STATERR_LB 0x00040000 + +/* Advanced Receive Descriptor bit definitions */ +#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +#define IGC_RXDEXT_STATERR_L4E 0x20000000 +#define IGC_RXDEXT_STATERR_IPE 0x40000000 +#define IGC_RXDEXT_STATERR_RXE 0x80000000 + +#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Header split receive */ +#define IGC_RFCTL_IPV6_EX_DIS 0x00010000 +#define IGC_RFCTL_LEF 0x00040000 + +#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ + +#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ +#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ + +#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */ + +#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ +#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ + +/* Transmit Scheduling Latency */ +/* Latency between transmission scheduling (LaunchTime) and the time + * the packet is transmitted to the network in nanosecond. + */ +#define IGC_TXOFFSET_SPEED_10 0x000034BC +#define IGC_TXOFFSET_SPEED_100 0x00000578 +#define IGC_TXOFFSET_SPEED_1000 0x0000012C +#define IGC_TXOFFSET_SPEED_2500 0x00000578 + +/* Time Sync Interrupt Causes */ +#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */ +#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ + +#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS + +#define IGC_FTQF_VF_BP 0x00008000 +#define IGC_FTQF_1588_TIME_STAMP 0x08000000 +#define IGC_FTQF_MASK 0xF0000000 +#define IGC_FTQF_MASK_PROTO_BP 0x10000000 + +/* Time Sync Receive Control bit definitions */ +#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IGC_TSYNCRXCTL_TYPE_ALL 0x08 +#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ +#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */ + +/* Time Sync Receive Configuration */ +#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 + +/* Immediate Interrupt Receive */ +#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +/* Immediate Interrupt Receive Extended */ +#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ +#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ + +/* Time Sync Transmit Control bit definitions */ +#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ +#define IGC_TSYNCTXCTL_TXTT_1 0x00000002 /* Tx timestamp reg 1 valid */ +#define IGC_TSYNCTXCTL_TXTT_2 0x00000004 /* Tx timestamp reg 2 valid */ +#define IGC_TSYNCTXCTL_TXTT_3 0x00000008 /* Tx timestamp reg 3 valid */ +#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ +#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ +#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ +#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ + +#define IGC_TSYNCTXCTL_TXTT_ANY ( \ + IGC_TSYNCTXCTL_TXTT_0 | IGC_TSYNCTXCTL_TXTT_1 | \ + IGC_TSYNCTXCTL_TXTT_2 | IGC_TSYNCTXCTL_TXTT_3) + +/* Timer selection bits */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for auxiliary time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for target time stamp */ + +/* TSAUXC Configuration Bits */ +#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define IGC_TSAUXC_DISABLE1 BIT(27) /* Disable SYSTIM0 Count Operation. */ +#define IGC_TSAUXC_DISABLE2 BIT(28) /* Disable SYSTIM1 Count Operation. */ +#define IGC_TSAUXC_DISABLE3 BIT(29) /* Disable SYSTIM2 Count Operation. */ +#define IGC_TSAUXC_DIS_TS_CLEAR BIT(30) /* Disable EN_TT0/1 auto clear. */ +#define IGC_TSAUXC_DISABLE0 BIT(31) /* Disable SYSTIM0 Count Operation. */ + +/* SDP Configuration Bits */ +#define IGC_AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define IGC_AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define IGC_AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define IGC_AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define IGC_TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define IGC_TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define IGC_TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define IGC_TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define IGC_TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define IGC_TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define IGC_TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define IGC_TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +/* Transmit Scheduling */ +#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 +#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 +#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080 + +#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 +#define IGC_TXQCTL_STRICT_CYCLE 0x00000002 +#define IGC_TXQCTL_STRICT_END 0x00000004 +#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0 +#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080 +#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0 + +#define IGC_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define IGC_TQAVCC_KEEP_CREDITS BIT(30) + +#define IGC_MAX_SR_QUEUES 2 + +/* Receive Checksum Control */ +#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* PCIe PTM Control */ +#define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ +#define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ +#define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ +#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2) +#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) + +#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */ +#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ +#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ + +/* PCIe Digital Delay */ +#define IGC_PCIE_DIG_DELAY_DEFAULT 0x01440000 + +/* PCIe PHY Delay */ +#define IGC_PCIE_PHY_DELAY_DEFAULT 0x40900000 + +#define IGC_TIMADJ_ADJUST_METH 0x40000000 + +/* PCIe PTM Status */ +#define IGC_PTM_STAT_VALID BIT(0) /* PTM Status */ +#define IGC_PTM_STAT_RET_ERR BIT(1) /* Root port timeout */ +#define IGC_PTM_STAT_BAD_PTM_RES BIT(2) /* PTM Response msg instead of PTM Response Data */ +#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ +#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ +#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ + +/* PCIe PTM Cycle Control */ +#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ +#define IGC_PTM_CYCLE_CTRL_AUTO_CYC_EN BIT(31) /* PTM Cycle Control */ + +/* GPY211 - I225 defines */ +#define GPY_MMD_MASK 0xFFFF0000 +#define GPY_MMD_SHIFT 16 +#define GPY_REG_MASK 0x0000FFFF + +#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* MAC definitions */ +#define IGC_FACTPS_MNGCG 0x20000000 +#define IGC_FWSM_MODE_MASK 0xE +#define IGC_FWSM_MODE_SHIFT 1 + +/* Management Control */ +#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ + +/* PHY */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define IGC_GEN_POLL_TIMEOUT 1920 + +/* PHY Control Register */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */ + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* MDI Control */ +#define IGC_MDIC_DATA_MASK 0x0000FFFF +#define IGC_MDIC_REG_MASK 0x001F0000 +#define IGC_MDIC_REG_SHIFT 16 +#define IGC_MDIC_PHY_MASK 0x03E00000 +#define IGC_MDIC_PHY_SHIFT 21 +#define IGC_MDIC_OP_WRITE 0x04000000 +#define IGC_MDIC_OP_READ 0x08000000 +#define IGC_MDIC_READY 0x10000000 +#define IGC_MDIC_ERROR 0x40000000 + +#define IGC_N0_QUEUE -1 + +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + +#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLANPQF_QUEUE_MASK 0x03 + +#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */ +#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */ +#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ + +/* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + +/* EEE defines */ +#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */ +#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ + +/* LTR defines */ +#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */ +#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */ +#define IGC_TW_SYSTEM_1000_MASK 0x000000FF +/* Minimum time for 100BASE-T where no data will be transmit following move out + * of EEE LPI Tx state + */ +#define IGC_TW_SYSTEM_100_MASK 0x0000FF00 +#define IGC_TW_SYSTEM_100_SHIFT 8 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMINV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMINV_SCALE_32768 3 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMAXV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMAXV_SCALE_32768 3 +#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */ +#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */ +#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMINV_SCALE_SHIFT 10 +#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMAXV_SCALE_SHIFT 10 + +#endif /* _IGC_DEFINES_H_ */ diff --git a/devices/igc/igc_defines-6.6-orig.h b/devices/igc/igc_defines-6.6-orig.h new file mode 100644 index 00000000..b3037016 --- /dev/null +++ b/devices/igc/igc_defines-6.6-orig.h @@ -0,0 +1,687 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_DEFINES_H_ +#define _IGC_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +#define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ +#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IGC_WUFC_FLEX_HQ BIT(14) /* Flex Filters Host Queuing */ +#define IGC_WUFC_FLX0 BIT(16) /* Flexible Filter 0 Enable */ +#define IGC_WUFC_FLX1 BIT(17) /* Flexible Filter 1 Enable */ +#define IGC_WUFC_FLX2 BIT(18) /* Flexible Filter 2 Enable */ +#define IGC_WUFC_FLX3 BIT(19) /* Flexible Filter 3 Enable */ +#define IGC_WUFC_FLX4 BIT(20) /* Flexible Filter 4 Enable */ +#define IGC_WUFC_FLX5 BIT(21) /* Flexible Filter 5 Enable */ +#define IGC_WUFC_FLX6 BIT(22) /* Flexible Filter 6 Enable */ +#define IGC_WUFC_FLX7 BIT(23) /* Flexible Filter 7 Enable */ + +#define IGC_WUFC_FILTER_MASK GENMASK(23, 14) + +#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ + +/* Wake Up Status */ +#define IGC_WUS_EX 0x00000004 /* Directed Exact */ +#define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */ +#define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */ +#define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */ +#define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ + +/* Packet types that are enabled for wake packet delivery */ +#define WAKE_PKT_WUS ( \ + IGC_WUS_EX | \ + IGC_WUS_ARPD | \ + IGC_WUS_IPV4 | \ + IGC_WUS_IPV6 | \ + IGC_WUS_NSD) + +/* Wake Up Packet Length */ +#define IGC_WUPL_MASK 0x00000FFF + +/* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ +#define IGC_WUPM_BYTES 128 + +/* Wakeup Filter Control Extended */ +#define IGC_WUFC_EXT_FLX8 BIT(8) /* Flexible Filter 8 Enable */ +#define IGC_WUFC_EXT_FLX9 BIT(9) /* Flexible Filter 9 Enable */ +#define IGC_WUFC_EXT_FLX10 BIT(10) /* Flexible Filter 10 Enable */ +#define IGC_WUFC_EXT_FLX11 BIT(11) /* Flexible Filter 11 Enable */ +#define IGC_WUFC_EXT_FLX12 BIT(12) /* Flexible Filter 12 Enable */ +#define IGC_WUFC_EXT_FLX13 BIT(13) /* Flexible Filter 13 Enable */ +#define IGC_WUFC_EXT_FLX14 BIT(14) /* Flexible Filter 14 Enable */ +#define IGC_WUFC_EXT_FLX15 BIT(15) /* Flexible Filter 15 Enable */ +#define IGC_WUFC_EXT_FLX16 BIT(16) /* Flexible Filter 16 Enable */ +#define IGC_WUFC_EXT_FLX17 BIT(17) /* Flexible Filter 17 Enable */ +#define IGC_WUFC_EXT_FLX18 BIT(18) /* Flexible Filter 18 Enable */ +#define IGC_WUFC_EXT_FLX19 BIT(19) /* Flexible Filter 19 Enable */ +#define IGC_WUFC_EXT_FLX20 BIT(20) /* Flexible Filter 20 Enable */ +#define IGC_WUFC_EXT_FLX21 BIT(21) /* Flexible Filter 21 Enable */ +#define IGC_WUFC_EXT_FLX22 BIT(22) /* Flexible Filter 22 Enable */ +#define IGC_WUFC_EXT_FLX23 BIT(23) /* Flexible Filter 23 Enable */ +#define IGC_WUFC_EXT_FLX24 BIT(24) /* Flexible Filter 24 Enable */ +#define IGC_WUFC_EXT_FLX25 BIT(25) /* Flexible Filter 25 Enable */ +#define IGC_WUFC_EXT_FLX26 BIT(26) /* Flexible Filter 26 Enable */ +#define IGC_WUFC_EXT_FLX27 BIT(27) /* Flexible Filter 27 Enable */ +#define IGC_WUFC_EXT_FLX28 BIT(28) /* Flexible Filter 28 Enable */ +#define IGC_WUFC_EXT_FLX29 BIT(29) /* Flexible Filter 29 Enable */ +#define IGC_WUFC_EXT_FLX30 BIT(30) /* Flexible Filter 30 Enable */ +#define IGC_WUFC_EXT_FLX31 BIT(31) /* Flexible Filter 31 Enable */ + +#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8) + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/*Blocks new Master requests */ +#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 +/* Status of Master requests. */ +#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define IGC_RAH_RAH_MASK 0x0000FFFF +#define IGC_RAH_ASEL_MASK 0x00030000 +#define IGC_RAH_ASEL_SRC_ADDR BIT(16) +#define IGC_RAH_QSEL_MASK 0x000C0000 +#define IGC_RAH_QSEL_SHIFT 18 +#define IGC_RAH_QSEL_ENABLE BIT(28) +#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ + +#define IGC_RAL_MAC_ADDR_LEN 4 +#define IGC_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define IGC_SUCCESS 0 +#define IGC_ERR_NVM 1 +#define IGC_ERR_PHY 2 +#define IGC_ERR_CONFIG 3 +#define IGC_ERR_PARAM 4 +#define IGC_ERR_MAC_INIT 5 +#define IGC_ERR_RESET 9 +#define IGC_ERR_MASTER_REQUESTS_PENDING 10 +#define IGC_ERR_BLK_PHY_RESET 12 +#define IGC_ERR_SWFW_SYNC 13 + +/* Device Control */ +#define IGC_CTRL_RST 0x04000000 /* Global reset */ + +#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ + +#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ + +#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ + +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 + +/* PBA constants */ +#define IGC_PBA_34K 0x0022 + +/* SW Semaphore Register */ +#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* SWFW_SYNC Definitions */ +#define IGC_SWFW_EEP_SM 0x1 +#define IGC_SWFW_PHY0_SM 0x2 + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ + +/* PHY GPY 211 registers */ +#define STANDARD_AN_REG_MASK 0x0007 /* MMD */ +#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ +#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ +#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ + +/* NVM Control */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 +#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ +#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define IGC_EECD_ADDR_BITS 0x00000400 +#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define IGC_EECD_SIZE_EX_SHIFT 11 +#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ +#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/ +#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ +#define IGC_FLUDONE_ATTEMPTS 20000 +#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ + +/* Offset to data in NVM read/write registers */ +#define IGC_NVM_RW_REG_DATA 16 +#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define IGC_NVM_RW_REG_START 1 /* Start operation */ +#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define IGC_NVM_DEV_STARTER 5 /* Dev_starter Version */ + +/* NVM Word Offsets */ +#define NVM_CHECKSUM_REG 0x003F + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* Collision related configuration parameters */ +#define IGC_COLLISION_THRESHOLD 15 +#define IGC_CT_SHIFT 4 +#define IGC_COLLISION_DISTANCE 63 +#define IGC_COLD_SHIFT 12 + +/* Device Status */ +#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define IGC_STATUS_FUNC_SHIFT 2 +#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */ +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 +#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */ +#define ADVERTISE_2500_FULL 0x0080 + +#define IGC_ALL_SPEED_DUPLEX_2500 ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 + +/* Interrupt Cause Read */ +#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */ +#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */ +#define IGC_ICR_LSC BIT(2) /* Link Status Change */ +#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */ +#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */ +#define IGC_ICR_RXO BIT(6) /* Rx overrun */ +#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ +#define IGC_ICR_TS BIT(19) /* Time Sync Interrupt */ +#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ + +/* If this bit asserted, the driver should claim the interrupt */ +#define IGC_ICR_INT_ASSERTED BIT(31) + +#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ + +#define IMS_ENABLE_MASK ( \ + IGC_IMS_RXT0 | \ + IGC_IMS_TXDW | \ + IGC_IMS_RXDMT0 | \ + IGC_IMS_RXSEQ | \ + IGC_IMS_LSC) + +/* Interrupt Mask Set */ +#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ +#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ +#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ +#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ +#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ +#define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */ + +#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ +#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ + +/* Interrupt Cause Set */ +#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ +#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */ + +#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define IGC_IVAR_VALID 0x80 +#define IGC_GPIE_NSICR 0x00000001 +#define IGC_GPIE_MSIX_MODE 0x00000010 +#define IGC_GPIE_EIAME 0x40000000 +#define IGC_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */ + +/* Transmit Descriptor bit definitions */ +#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ +#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* IPSec Encrypt Enable */ +#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080 + +/* Transmit Control */ +#define IGC_TCTL_EN 0x00000002 /* enable Tx */ +#define IGC_TCTL_PSP 0x00000008 /* pad short packets */ +#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ +#define IGC_TCTL_COLD 0x003ff000 /* collision distance */ +#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 +/* Enable XON frame transmission */ +#define IGC_FCRTL_XONE 0x80000000 + +/* Management Control */ +#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ + +/* Receive Control */ +#define IGC_RCTL_RST 0x00000001 /* Software reset */ +#define IGC_RCTL_EN 0x00000002 /* enable */ +#define IGC_RCTL_SBP 0x00000004 /* store bad packet */ +#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define IGC_RCTL_LPE 0x00000020 /* long packet enable */ +#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ + +#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ + +/* Split Replication Receive Control */ +#define IGC_SRRCTL_TIMESTAMP 0x40000000 +#define IGC_SRRCTL_TIMER1SEL(timer) (((timer) & 0x3) << 14) +#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17) + +/* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ + +#define IGC_RXDEXT_STATERR_LB 0x00040000 + +/* Advanced Receive Descriptor bit definitions */ +#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +#define IGC_RXDEXT_STATERR_L4E 0x20000000 +#define IGC_RXDEXT_STATERR_IPE 0x40000000 +#define IGC_RXDEXT_STATERR_RXE 0x80000000 + +#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Header split receive */ +#define IGC_RFCTL_IPV6_EX_DIS 0x00010000 +#define IGC_RFCTL_LEF 0x00040000 + +#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ + +#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ +#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ + +#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */ + +#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ +#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ + +/* Transmit Scheduling Latency */ +/* Latency between transmission scheduling (LaunchTime) and the time + * the packet is transmitted to the network in nanosecond. + */ +#define IGC_TXOFFSET_SPEED_10 0x000034BC +#define IGC_TXOFFSET_SPEED_100 0x00000578 +#define IGC_TXOFFSET_SPEED_1000 0x0000012C +#define IGC_TXOFFSET_SPEED_2500 0x00000578 + +/* Time Sync Interrupt Causes */ +#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */ +#define IGC_TSICR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define IGC_TSICR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define IGC_TSICR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define IGC_TSICR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ + +#define IGC_TSICR_INTERRUPTS IGC_TSICR_TXTS + +#define IGC_FTQF_VF_BP 0x00008000 +#define IGC_FTQF_1588_TIME_STAMP 0x08000000 +#define IGC_FTQF_MASK 0xF0000000 +#define IGC_FTQF_MASK_PROTO_BP 0x10000000 + +/* Time Sync Receive Control bit definitions */ +#define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IGC_TSYNCRXCTL_TYPE_ALL 0x08 +#define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ +#define IGC_TSYNCRXCTL_RXSYNSIG 0x00000400 /* Sample RX tstamp in PHY sop */ + +/* Time Sync Receive Configuration */ +#define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 + +/* Immediate Interrupt Receive */ +#define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +/* Immediate Interrupt Receive Extended */ +#define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ +#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ + +/* Time Sync Transmit Control bit definitions */ +#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ +#define IGC_TSYNCTXCTL_TXTT_1 0x00000002 /* Tx timestamp reg 1 valid */ +#define IGC_TSYNCTXCTL_TXTT_2 0x00000004 /* Tx timestamp reg 2 valid */ +#define IGC_TSYNCTXCTL_TXTT_3 0x00000008 /* Tx timestamp reg 3 valid */ +#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ +#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ +#define IGC_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ +#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */ + +#define IGC_TSYNCTXCTL_TXTT_ANY ( \ + IGC_TSYNCTXCTL_TXTT_0 | IGC_TSYNCTXCTL_TXTT_1 | \ + IGC_TSYNCTXCTL_TXTT_2 | IGC_TSYNCTXCTL_TXTT_3) + +/* Timer selection bits */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for auxiliary time stamp */ +#define IGC_AUX_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for auxiliary time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM2 (2u << 30) /* Select SYSTIM2 for target time stamp */ +#define IGC_TT_IO_TIMER_SEL_SYSTIM3 (3u << 30) /* Select SYSTIM3 for target time stamp */ + +/* TSAUXC Configuration Bits */ +#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define IGC_TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define IGC_TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define IGC_TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define IGC_TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define IGC_TSAUXC_DISABLE1 BIT(27) /* Disable SYSTIM0 Count Operation. */ +#define IGC_TSAUXC_DISABLE2 BIT(28) /* Disable SYSTIM1 Count Operation. */ +#define IGC_TSAUXC_DISABLE3 BIT(29) /* Disable SYSTIM2 Count Operation. */ +#define IGC_TSAUXC_DIS_TS_CLEAR BIT(30) /* Disable EN_TT0/1 auto clear. */ +#define IGC_TSAUXC_DISABLE0 BIT(31) /* Disable SYSTIM0 Count Operation. */ + +/* SDP Configuration Bits */ +#define IGC_AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define IGC_AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define IGC_AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define IGC_AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define IGC_AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define IGC_AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define IGC_TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define IGC_TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define IGC_TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define IGC_TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define IGC_TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define IGC_TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define IGC_TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define IGC_TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define IGC_TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define IGC_TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define IGC_TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define IGC_TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ + +/* Transmit Scheduling */ +#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 +#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 +#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080 + +#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 +#define IGC_TXQCTL_STRICT_CYCLE 0x00000002 +#define IGC_TXQCTL_STRICT_END 0x00000004 +#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0 +#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080 +#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0 + +#define IGC_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define IGC_TQAVCC_KEEP_CREDITS BIT(30) + +#define IGC_MAX_SR_QUEUES 2 + +/* Receive Checksum Control */ +#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* PCIe PTM Control */ +#define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ +#define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ +#define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ +#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2) +#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) + +#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */ +#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ +#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ + +/* PCIe Digital Delay */ +#define IGC_PCIE_DIG_DELAY_DEFAULT 0x01440000 + +/* PCIe PHY Delay */ +#define IGC_PCIE_PHY_DELAY_DEFAULT 0x40900000 + +#define IGC_TIMADJ_ADJUST_METH 0x40000000 + +/* PCIe PTM Status */ +#define IGC_PTM_STAT_VALID BIT(0) /* PTM Status */ +#define IGC_PTM_STAT_RET_ERR BIT(1) /* Root port timeout */ +#define IGC_PTM_STAT_BAD_PTM_RES BIT(2) /* PTM Response msg instead of PTM Response Data */ +#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ +#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ +#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ + +/* PCIe PTM Cycle Control */ +#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ +#define IGC_PTM_CYCLE_CTRL_AUTO_CYC_EN BIT(31) /* PTM Cycle Control */ + +/* GPY211 - I225 defines */ +#define GPY_MMD_MASK 0xFFFF0000 +#define GPY_MMD_SHIFT 16 +#define GPY_REG_MASK 0x0000FFFF + +#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* MAC definitions */ +#define IGC_FACTPS_MNGCG 0x20000000 +#define IGC_FWSM_MODE_MASK 0xE +#define IGC_FWSM_MODE_SHIFT 1 + +/* Management Control */ +#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ + +/* PHY */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define IGC_GEN_POLL_TIMEOUT 1920 + +/* PHY Control Register */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */ + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* MDI Control */ +#define IGC_MDIC_DATA_MASK 0x0000FFFF +#define IGC_MDIC_REG_MASK 0x001F0000 +#define IGC_MDIC_REG_SHIFT 16 +#define IGC_MDIC_PHY_MASK 0x03E00000 +#define IGC_MDIC_PHY_SHIFT 21 +#define IGC_MDIC_OP_WRITE 0x04000000 +#define IGC_MDIC_OP_READ 0x08000000 +#define IGC_MDIC_READY 0x10000000 +#define IGC_MDIC_ERROR 0x40000000 + +#define IGC_N0_QUEUE -1 + +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + +#define IGC_VLANPQF_QSEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLANPQF_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLANPQF_QUEUE_MASK 0x03 + +#define IGC_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IGC_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type:1=IPv4 */ +#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */ +#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ + +/* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + +/* EEE defines */ +#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */ +#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ + +/* LTR defines */ +#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */ +#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */ +#define IGC_TW_SYSTEM_1000_MASK 0x000000FF +/* Minimum time for 100BASE-T where no data will be transmit following move out + * of EEE LPI Tx state + */ +#define IGC_TW_SYSTEM_100_MASK 0x0000FF00 +#define IGC_TW_SYSTEM_100_SHIFT 8 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMINV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMINV_SCALE_32768 3 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMAXV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMAXV_SCALE_32768 3 +#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */ +#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */ +#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMINV_SCALE_SHIFT 10 +#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMAXV_SCALE_SHIFT 10 + +#endif /* _IGC_DEFINES_H_ */ diff --git a/devices/igc/igc_diag-6.4-ethercat.c b/devices/igc/igc_diag-6.4-ethercat.c new file mode 100644 index 00000000..028ba55f --- /dev/null +++ b/devices/igc/igc_diag-6.4-ethercat.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Intel Corporation */ + +#include "igc-6.4-ethercat.h" +#include "igc_diag-6.4-ethercat.h" + +static struct igc_reg_test reg_test[] = { + { IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, + { IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF }, + { IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RA, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RA, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { IGC_MTA, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0} +}; + +static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(reg); + wr32(reg, test_pattern[pat] & write); + val = rd32(reg); + if (val != (test_pattern[pat] & write & mask)) { + netdev_err(adapter->netdev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + } + return true; +} + +static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 val, before; + + before = rd32(reg); + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + netdev_err(adapter->netdev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + return true; +} + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_reg_test *test = reg_test; + struct igc_hw *hw = &adapter->hw; + u32 value, before, after; + u32 i, toggle, b = false; + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable. + */ + toggle = 0x6800D3; + before = rd32(IGC_STATUS); + value = before & toggle; + wr32(IGC_STATUS, toggle); + after = rd32(IGC_STATUS) & toggle; + if (value != after) { + netdev_err(adapter->netdev, + "failed STATUS register test got: 0x%08X expected: 0x%08X", + after, value); + *data = 1; + return false; + } + /* restore previous status */ + wr32(IGC_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + } + if (!b) + return false; + } + test++; + } + *data = 0; + return true; +} + +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_hw *hw = &adapter->hw; + + *data = 0; + + if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) { + *data = 1; + return false; + } + + return true; +} + +bool igc_link_test(struct igc_adapter *adapter, u64 *data) +{ + bool link_up; + + *data = 0; + + /* add delay to give enough time for autonegotioation to finish */ + if (adapter->hw.mac.autoneg) + ssleep(5); + + link_up = igc_has_link(adapter); + if (!link_up) { + *data = 1; + return false; + } + + return true; +} diff --git a/devices/igc/igc_diag-6.4-ethercat.h b/devices/igc/igc_diag-6.4-ethercat.h new file mode 100644 index 00000000..600658e3 --- /dev/null +++ b/devices/igc/igc_diag-6.4-ethercat.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data); +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data); +bool igc_link_test(struct igc_adapter *adapter, u64 *data); + +struct igc_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define TABLE32_TEST 3 +#define TABLE64_TEST_LO 4 +#define TABLE64_TEST_HI 5 diff --git a/devices/igc/igc_diag-6.4-orig.c b/devices/igc/igc_diag-6.4-orig.c new file mode 100644 index 00000000..cc621970 --- /dev/null +++ b/devices/igc/igc_diag-6.4-orig.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Intel Corporation */ + +#include "igc.h" +#include "igc_diag.h" + +static struct igc_reg_test reg_test[] = { + { IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, + { IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF }, + { IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RA, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RA, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { IGC_MTA, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0} +}; + +static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(reg); + wr32(reg, test_pattern[pat] & write); + val = rd32(reg); + if (val != (test_pattern[pat] & write & mask)) { + netdev_err(adapter->netdev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + } + return true; +} + +static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 val, before; + + before = rd32(reg); + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + netdev_err(adapter->netdev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + return true; +} + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_reg_test *test = reg_test; + struct igc_hw *hw = &adapter->hw; + u32 value, before, after; + u32 i, toggle, b = false; + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable. + */ + toggle = 0x6800D3; + before = rd32(IGC_STATUS); + value = before & toggle; + wr32(IGC_STATUS, toggle); + after = rd32(IGC_STATUS) & toggle; + if (value != after) { + netdev_err(adapter->netdev, + "failed STATUS register test got: 0x%08X expected: 0x%08X", + after, value); + *data = 1; + return false; + } + /* restore previous status */ + wr32(IGC_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + } + if (!b) + return false; + } + test++; + } + *data = 0; + return true; +} + +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_hw *hw = &adapter->hw; + + *data = 0; + + if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) { + *data = 1; + return false; + } + + return true; +} + +bool igc_link_test(struct igc_adapter *adapter, u64 *data) +{ + bool link_up; + + *data = 0; + + /* add delay to give enough time for autonegotioation to finish */ + if (adapter->hw.mac.autoneg) + ssleep(5); + + link_up = igc_has_link(adapter); + if (!link_up) { + *data = 1; + return false; + } + + return true; +} diff --git a/devices/igc/igc_diag-6.4-orig.h b/devices/igc/igc_diag-6.4-orig.h new file mode 100644 index 00000000..600658e3 --- /dev/null +++ b/devices/igc/igc_diag-6.4-orig.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data); +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data); +bool igc_link_test(struct igc_adapter *adapter, u64 *data); + +struct igc_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define TABLE32_TEST 3 +#define TABLE64_TEST_LO 4 +#define TABLE64_TEST_HI 5 diff --git a/devices/igc/igc_diag-6.6-ethercat.c b/devices/igc/igc_diag-6.6-ethercat.c new file mode 100644 index 00000000..ed19e433 --- /dev/null +++ b/devices/igc/igc_diag-6.6-ethercat.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Intel Corporation */ + +#include "igc-6.6-ethercat.h" +#include "igc_diag-6.6-ethercat.h" + +static struct igc_reg_test reg_test[] = { + { IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, + { IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF }, + { IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RA, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RA, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { IGC_MTA, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0} +}; + +static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(reg); + wr32(reg, test_pattern[pat] & write); + val = rd32(reg); + if (val != (test_pattern[pat] & write & mask)) { + netdev_err(adapter->netdev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + } + return true; +} + +static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 val, before; + + before = rd32(reg); + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + netdev_err(adapter->netdev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + return true; +} + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_reg_test *test = reg_test; + struct igc_hw *hw = &adapter->hw; + u32 value, before, after; + u32 i, toggle, b = false; + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable. + */ + toggle = 0x6800D3; + before = rd32(IGC_STATUS); + value = before & toggle; + wr32(IGC_STATUS, toggle); + after = rd32(IGC_STATUS) & toggle; + if (value != after) { + netdev_err(adapter->netdev, + "failed STATUS register test got: 0x%08X expected: 0x%08X", + after, value); + *data = 1; + return false; + } + /* restore previous status */ + wr32(IGC_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + } + if (!b) + return false; + } + test++; + } + *data = 0; + return true; +} + +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_hw *hw = &adapter->hw; + + *data = 0; + + if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) { + *data = 1; + return false; + } + + return true; +} + +bool igc_link_test(struct igc_adapter *adapter, u64 *data) +{ + bool link_up; + + *data = 0; + + /* add delay to give enough time for autonegotioation to finish */ + if (adapter->hw.mac.autoneg) + ssleep(5); + + link_up = igc_has_link(adapter); + if (!link_up) { + *data = 1; + return false; + } + + return true; +} diff --git a/devices/igc/igc_diag-6.6-ethercat.h b/devices/igc/igc_diag-6.6-ethercat.h new file mode 100644 index 00000000..600658e3 --- /dev/null +++ b/devices/igc/igc_diag-6.6-ethercat.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data); +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data); +bool igc_link_test(struct igc_adapter *adapter, u64 *data); + +struct igc_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define TABLE32_TEST 3 +#define TABLE64_TEST_LO 4 +#define TABLE64_TEST_HI 5 diff --git a/devices/igc/igc_diag-6.6-orig.c b/devices/igc/igc_diag-6.6-orig.c new file mode 100644 index 00000000..cc621970 --- /dev/null +++ b/devices/igc/igc_diag-6.6-orig.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Intel Corporation */ + +#include "igc.h" +#include "igc_diag.h" + +static struct igc_reg_test reg_test[] = { + { IGC_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { IGC_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_FCRTH, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 }, + { IGC_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { IGC_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IGC_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IGC_TDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IGC_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0x003FFFFB }, + { IGC_RCTL, 1, SET_READ_TEST, 0x04CFB2FE, 0xFFFFFFFF }, + { IGC_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { IGC_RA, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { IGC_RA, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { IGC_MTA, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0} +}; + +static bool reg_pattern_test(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = rd32(reg); + wr32(reg, test_pattern[pat] & write); + val = rd32(reg); + if (val != (test_pattern[pat] & write & mask)) { + netdev_err(adapter->netdev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X", + reg, val, test_pattern[pat] & write & mask); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + } + return true; +} + +static bool reg_set_and_check(struct igc_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct igc_hw *hw = &adapter->hw; + u32 val, before; + + before = rd32(reg); + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + netdev_err(adapter->netdev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X", + reg, (val & mask), (write & mask)); + *data = reg; + wr32(reg, before); + return false; + } + wr32(reg, before); + return true; +} + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_reg_test *test = reg_test; + struct igc_hw *hw = &adapter->hw; + u32 value, before, after; + u32 i, toggle, b = false; + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable. + */ + toggle = 0x6800D3; + before = rd32(IGC_STATUS); + value = before & toggle; + wr32(IGC_STATUS, toggle); + after = rd32(IGC_STATUS) & toggle; + if (value != after) { + netdev_err(adapter->netdev, + "failed STATUS register test got: 0x%08X expected: 0x%08X", + after, value); + *data = 1; + return false; + } + /* restore previous status */ + wr32(IGC_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + } + if (!b) + return false; + } + test++; + } + *data = 0; + return true; +} + +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data) +{ + struct igc_hw *hw = &adapter->hw; + + *data = 0; + + if (hw->nvm.ops.validate(hw) != IGC_SUCCESS) { + *data = 1; + return false; + } + + return true; +} + +bool igc_link_test(struct igc_adapter *adapter, u64 *data) +{ + bool link_up; + + *data = 0; + + /* add delay to give enough time for autonegotioation to finish */ + if (adapter->hw.mac.autoneg) + ssleep(5); + + link_up = igc_has_link(adapter); + if (!link_up) { + *data = 1; + return false; + } + + return true; +} diff --git a/devices/igc/igc_diag-6.6-orig.h b/devices/igc/igc_diag-6.6-orig.h new file mode 100644 index 00000000..600658e3 --- /dev/null +++ b/devices/igc/igc_diag-6.6-orig.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +bool igc_reg_test(struct igc_adapter *adapter, u64 *data); +bool igc_eeprom_test(struct igc_adapter *adapter, u64 *data); +bool igc_link_test(struct igc_adapter *adapter, u64 *data); + +struct igc_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define TABLE32_TEST 3 +#define TABLE64_TEST_LO 4 +#define TABLE64_TEST_HI 5 diff --git a/devices/igc/igc_dump-6.4-ethercat.c b/devices/igc/igc_dump-6.4-ethercat.c new file mode 100644 index 00000000..f2987242 --- /dev/null +++ b/devices/igc/igc_dump-6.4-ethercat.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc-6.4-ethercat.h" + +struct igc_reg_info { + u32 ofs; + char *name; +}; + +static const struct igc_reg_info igc_reg_info_tbl[] = { + /* General Registers */ + {IGC_CTRL, "CTRL"}, + {IGC_STATUS, "STATUS"}, + {IGC_CTRL_EXT, "CTRL_EXT"}, + {IGC_MDIC, "MDIC"}, + + /* Interrupt Registers */ + {IGC_ICR, "ICR"}, + + /* RX Registers */ + {IGC_RCTL, "RCTL"}, + {IGC_RDLEN(0), "RDLEN"}, + {IGC_RDH(0), "RDH"}, + {IGC_RDT(0), "RDT"}, + {IGC_RXDCTL(0), "RXDCTL"}, + {IGC_RDBAL(0), "RDBAL"}, + {IGC_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IGC_TCTL, "TCTL"}, + {IGC_TDBAL(0), "TDBAL"}, + {IGC_TDBAH(0), "TDBAH"}, + {IGC_TDLEN(0), "TDLEN"}, + {IGC_TDH(0), "TDH"}, + {IGC_TDT(0), "TDT"}, + {IGC_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + {} +}; + +/* igc_regdump - register printout routine */ +static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo) +{ + struct net_device *dev = igc_get_hw_dev(hw); + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case IGC_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDLEN(n)); + break; + case IGC_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDH(n)); + break; + case IGC_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDT(n)); + break; + case IGC_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RXDCTL(n)); + break; + case IGC_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAL(n)); + break; + case IGC_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAH(n)); + break; + case IGC_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAL(n)); + break; + case IGC_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAH(n)); + break; + case IGC_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDLEN(n)); + break; + case IGC_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDH(n)); + break; + case IGC_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDT(n)); + break; + case IGC_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TXDCTL(n)); + break; + default: + netdev_info(dev, "%-15s %08x\n", reginfo->name, + rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igc_rings_dump - Tx-rings and Rx-rings */ +void igc_rings_dump(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct my_u0 { __le64 a; __le64 b; } *u0; + union igc_adv_tx_desc *tx_desc; + union igc_adv_rx_desc *rx_desc; + struct igc_ring *tx_ring; + struct igc_ring *rx_ring; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n", + netdev->state, dev_trans_start(netdev)); + + /* Print TX Ring Summary */ + if (!netif_running(netdev)) + goto exit; + + netdev_info(netdev, "TX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igc_tx_buffer *buffer_info; + + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + + netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + netdev_info(netdev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "TX QUEUE INDEX = %d\n", + tx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igc_tx_buffer *buffer_info; + + tx_desc = IGC_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + netdev_info(netdev, "RX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use, + rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + netdev_info(netdev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "RX QUEUE INDEX = %d\n", + rx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igc_rx_buffer *buffer_info; + + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGC_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & IGC_RXD_STAT_DD) { + /* Descriptor Done */ + netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address + (buffer_info->page) + + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + true); + } + } + } + } + +exit: + return; +} + +/* igc_regs_dump - registers dump */ +void igc_regs_dump(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + struct igc_reg_info *reginfo; + + /* Print Registers */ + netdev_info(adapter->netdev, "Register Dump\n"); + netdev_info(adapter->netdev, "Register Name Value\n"); + for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl; + reginfo->name; reginfo++) { + igc_regdump(hw, reginfo); + } +} diff --git a/devices/igc/igc_dump-6.4-orig.c b/devices/igc/igc_dump-6.4-orig.c new file mode 100644 index 00000000..c09c95cc --- /dev/null +++ b/devices/igc/igc_dump-6.4-orig.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc.h" + +struct igc_reg_info { + u32 ofs; + char *name; +}; + +static const struct igc_reg_info igc_reg_info_tbl[] = { + /* General Registers */ + {IGC_CTRL, "CTRL"}, + {IGC_STATUS, "STATUS"}, + {IGC_CTRL_EXT, "CTRL_EXT"}, + {IGC_MDIC, "MDIC"}, + + /* Interrupt Registers */ + {IGC_ICR, "ICR"}, + + /* RX Registers */ + {IGC_RCTL, "RCTL"}, + {IGC_RDLEN(0), "RDLEN"}, + {IGC_RDH(0), "RDH"}, + {IGC_RDT(0), "RDT"}, + {IGC_RXDCTL(0), "RXDCTL"}, + {IGC_RDBAL(0), "RDBAL"}, + {IGC_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IGC_TCTL, "TCTL"}, + {IGC_TDBAL(0), "TDBAL"}, + {IGC_TDBAH(0), "TDBAH"}, + {IGC_TDLEN(0), "TDLEN"}, + {IGC_TDH(0), "TDH"}, + {IGC_TDT(0), "TDT"}, + {IGC_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + {} +}; + +/* igc_regdump - register printout routine */ +static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo) +{ + struct net_device *dev = igc_get_hw_dev(hw); + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case IGC_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDLEN(n)); + break; + case IGC_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDH(n)); + break; + case IGC_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDT(n)); + break; + case IGC_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RXDCTL(n)); + break; + case IGC_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAL(n)); + break; + case IGC_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAH(n)); + break; + case IGC_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAL(n)); + break; + case IGC_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAH(n)); + break; + case IGC_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDLEN(n)); + break; + case IGC_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDH(n)); + break; + case IGC_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDT(n)); + break; + case IGC_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TXDCTL(n)); + break; + default: + netdev_info(dev, "%-15s %08x\n", reginfo->name, + rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igc_rings_dump - Tx-rings and Rx-rings */ +void igc_rings_dump(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct my_u0 { __le64 a; __le64 b; } *u0; + union igc_adv_tx_desc *tx_desc; + union igc_adv_rx_desc *rx_desc; + struct igc_ring *tx_ring; + struct igc_ring *rx_ring; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n", + netdev->state, dev_trans_start(netdev)); + + /* Print TX Ring Summary */ + if (!netif_running(netdev)) + goto exit; + + netdev_info(netdev, "TX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igc_tx_buffer *buffer_info; + + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + + netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + netdev_info(netdev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "TX QUEUE INDEX = %d\n", + tx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igc_tx_buffer *buffer_info; + + tx_desc = IGC_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + netdev_info(netdev, "RX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use, + rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + netdev_info(netdev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "RX QUEUE INDEX = %d\n", + rx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igc_rx_buffer *buffer_info; + + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGC_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & IGC_RXD_STAT_DD) { + /* Descriptor Done */ + netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address + (buffer_info->page) + + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + true); + } + } + } + } + +exit: + return; +} + +/* igc_regs_dump - registers dump */ +void igc_regs_dump(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + struct igc_reg_info *reginfo; + + /* Print Registers */ + netdev_info(adapter->netdev, "Register Dump\n"); + netdev_info(adapter->netdev, "Register Name Value\n"); + for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl; + reginfo->name; reginfo++) { + igc_regdump(hw, reginfo); + } +} diff --git a/devices/igc/igc_dump-6.6-ethercat.c b/devices/igc/igc_dump-6.6-ethercat.c new file mode 100644 index 00000000..85de2cbd --- /dev/null +++ b/devices/igc/igc_dump-6.6-ethercat.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc-6.6-ethercat.h" + +struct igc_reg_info { + u32 ofs; + char *name; +}; + +static const struct igc_reg_info igc_reg_info_tbl[] = { + /* General Registers */ + {IGC_CTRL, "CTRL"}, + {IGC_STATUS, "STATUS"}, + {IGC_CTRL_EXT, "CTRL_EXT"}, + {IGC_MDIC, "MDIC"}, + + /* Interrupt Registers */ + {IGC_ICR, "ICR"}, + + /* RX Registers */ + {IGC_RCTL, "RCTL"}, + {IGC_RDLEN(0), "RDLEN"}, + {IGC_RDH(0), "RDH"}, + {IGC_RDT(0), "RDT"}, + {IGC_RXDCTL(0), "RXDCTL"}, + {IGC_RDBAL(0), "RDBAL"}, + {IGC_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IGC_TCTL, "TCTL"}, + {IGC_TDBAL(0), "TDBAL"}, + {IGC_TDBAH(0), "TDBAH"}, + {IGC_TDLEN(0), "TDLEN"}, + {IGC_TDH(0), "TDH"}, + {IGC_TDT(0), "TDT"}, + {IGC_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + {} +}; + +/* igc_regdump - register printout routine */ +static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo) +{ + struct net_device *dev = igc_get_hw_dev(hw); + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case IGC_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDLEN(n)); + break; + case IGC_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDH(n)); + break; + case IGC_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDT(n)); + break; + case IGC_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RXDCTL(n)); + break; + case IGC_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAL(n)); + break; + case IGC_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAH(n)); + break; + case IGC_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAL(n)); + break; + case IGC_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAH(n)); + break; + case IGC_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDLEN(n)); + break; + case IGC_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDH(n)); + break; + case IGC_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDT(n)); + break; + case IGC_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TXDCTL(n)); + break; + default: + netdev_info(dev, "%-15s %08x\n", reginfo->name, + rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igc_rings_dump - Tx-rings and Rx-rings */ +void igc_rings_dump(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct my_u0 { __le64 a; __le64 b; } *u0; + union igc_adv_tx_desc *tx_desc; + union igc_adv_rx_desc *rx_desc; + struct igc_ring *tx_ring; + struct igc_ring *rx_ring; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n", + netdev->state, dev_trans_start(netdev)); + + /* Print TX Ring Summary */ + if (!netif_running(netdev)) + goto exit; + + netdev_info(netdev, "TX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igc_tx_buffer *buffer_info; + + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + + netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + netdev_info(netdev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "TX QUEUE INDEX = %d\n", + tx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igc_tx_buffer *buffer_info; + + tx_desc = IGC_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + netdev_info(netdev, "RX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use, + rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + netdev_info(netdev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "RX QUEUE INDEX = %d\n", + rx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igc_rx_buffer *buffer_info; + + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGC_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & IGC_RXD_STAT_DD) { + /* Descriptor Done */ + netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address + (buffer_info->page) + + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + true); + } + } + } + } + +exit: + return; +} + +/* igc_regs_dump - registers dump */ +void igc_regs_dump(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + struct igc_reg_info *reginfo; + + /* Print Registers */ + netdev_info(adapter->netdev, "Register Dump\n"); + netdev_info(adapter->netdev, "Register Name Value\n"); + for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl; + reginfo->name; reginfo++) { + igc_regdump(hw, reginfo); + } +} diff --git a/devices/igc/igc_dump-6.6-orig.c b/devices/igc/igc_dump-6.6-orig.c new file mode 100644 index 00000000..c09c95cc --- /dev/null +++ b/devices/igc/igc_dump-6.6-orig.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc.h" + +struct igc_reg_info { + u32 ofs; + char *name; +}; + +static const struct igc_reg_info igc_reg_info_tbl[] = { + /* General Registers */ + {IGC_CTRL, "CTRL"}, + {IGC_STATUS, "STATUS"}, + {IGC_CTRL_EXT, "CTRL_EXT"}, + {IGC_MDIC, "MDIC"}, + + /* Interrupt Registers */ + {IGC_ICR, "ICR"}, + + /* RX Registers */ + {IGC_RCTL, "RCTL"}, + {IGC_RDLEN(0), "RDLEN"}, + {IGC_RDH(0), "RDH"}, + {IGC_RDT(0), "RDT"}, + {IGC_RXDCTL(0), "RXDCTL"}, + {IGC_RDBAL(0), "RDBAL"}, + {IGC_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IGC_TCTL, "TCTL"}, + {IGC_TDBAL(0), "TDBAL"}, + {IGC_TDBAH(0), "TDBAH"}, + {IGC_TDLEN(0), "TDLEN"}, + {IGC_TDH(0), "TDH"}, + {IGC_TDT(0), "TDT"}, + {IGC_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + {} +}; + +/* igc_regdump - register printout routine */ +static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo) +{ + struct net_device *dev = igc_get_hw_dev(hw); + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case IGC_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDLEN(n)); + break; + case IGC_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDH(n)); + break; + case IGC_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDT(n)); + break; + case IGC_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RXDCTL(n)); + break; + case IGC_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAL(n)); + break; + case IGC_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_RDBAH(n)); + break; + case IGC_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAL(n)); + break; + case IGC_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDBAH(n)); + break; + case IGC_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDLEN(n)); + break; + case IGC_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDH(n)); + break; + case IGC_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TDT(n)); + break; + case IGC_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(IGC_TXDCTL(n)); + break; + default: + netdev_info(dev, "%-15s %08x\n", reginfo->name, + rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + netdev_info(dev, "%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igc_rings_dump - Tx-rings and Rx-rings */ +void igc_rings_dump(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct my_u0 { __le64 a; __le64 b; } *u0; + union igc_adv_tx_desc *tx_desc; + union igc_adv_rx_desc *rx_desc; + struct igc_ring *tx_ring; + struct igc_ring *rx_ring; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + netdev_info(netdev, "Device info: state %016lX trans_start %016lX\n", + netdev->state, dev_trans_start(netdev)); + + /* Print TX Ring Summary */ + if (!netif_running(netdev)) + goto exit; + + netdev_info(netdev, "TX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igc_tx_buffer *buffer_info; + + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + + netdev_info(netdev, "%5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + netdev_info(netdev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "TX QUEUE INDEX = %d\n", + tx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igc_tx_buffer *buffer_info; + + tx_desc = IGC_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + netdev_info(netdev, "T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + netdev_info(netdev, "RX Rings Summary\n"); + netdev_info(netdev, "Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "%5d %5X %5X\n", n, rx_ring->next_to_use, + rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + netdev_info(netdev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "RX QUEUE INDEX = %d\n", + rx_ring->queue_index); + netdev_info(netdev, "------------------------------------\n"); + netdev_info(netdev, "R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + netdev_info(netdev, "RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igc_rx_buffer *buffer_info; + + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGC_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & IGC_RXD_STAT_DD) { + /* Descriptor Done */ + netdev_info(netdev, "%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + netdev_info(netdev, "%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address + (buffer_info->page) + + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + true); + } + } + } + } + +exit: + return; +} + +/* igc_regs_dump - registers dump */ +void igc_regs_dump(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + struct igc_reg_info *reginfo; + + /* Print Registers */ + netdev_info(adapter->netdev, "Register Dump\n"); + netdev_info(adapter->netdev, "Register Name Value\n"); + for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl; + reginfo->name; reginfo++) { + igc_regdump(hw, reginfo); + } +} diff --git a/devices/igc/igc_ethtool-6.4-ethercat.c b/devices/igc/igc_ethtool-6.4-ethercat.c new file mode 100644 index 00000000..3e6c7af6 --- /dev/null +++ b/devices/igc/igc_ethtool-6.4-ethercat.c @@ -0,0 +1,1983 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +/* ethtool support for igc */ +#include +#include +#include + +#include "igc-6.4-ethercat.h" +#include "igc_diag-6.4-ethercat.h" + +/* forward declaration */ +struct igc_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \ + .stat_offset = offsetof(struct igc_adapter, _stat) \ +} + +static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_packets", stats.gprc), + IGC_STAT("tx_packets", stats.gptc), + IGC_STAT("rx_bytes", stats.gorc), + IGC_STAT("tx_bytes", stats.gotc), + IGC_STAT("rx_broadcast", stats.bprc), + IGC_STAT("tx_broadcast", stats.bptc), + IGC_STAT("rx_multicast", stats.mprc), + IGC_STAT("tx_multicast", stats.mptc), + IGC_STAT("multicast", stats.mprc), + IGC_STAT("collisions", stats.colc), + IGC_STAT("rx_crc_errors", stats.crcerrs), + IGC_STAT("rx_no_buffer_count", stats.rnbc), + IGC_STAT("rx_missed_errors", stats.mpc), + IGC_STAT("tx_aborted_errors", stats.ecol), + IGC_STAT("tx_carrier_errors", stats.tncrs), + IGC_STAT("tx_window_errors", stats.latecol), + IGC_STAT("tx_abort_late_coll", stats.latecol), + IGC_STAT("tx_deferred_ok", stats.dc), + IGC_STAT("tx_single_coll_ok", stats.scc), + IGC_STAT("tx_multi_coll_ok", stats.mcc), + IGC_STAT("tx_timeout_count", tx_timeout_count), + IGC_STAT("rx_long_length_errors", stats.roc), + IGC_STAT("rx_short_length_errors", stats.ruc), + IGC_STAT("rx_align_errors", stats.algnerrc), + IGC_STAT("tx_tcp_seg_good", stats.tsctc), + IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGC_STAT("rx_flow_control_xon", stats.xonrxc), + IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGC_STAT("tx_flow_control_xon", stats.xontxc), + IGC_STAT("tx_flow_control_xoff", stats.xofftxc), + IGC_STAT("rx_long_byte_count", stats.gorc), + IGC_STAT("tx_dma_out_of_sync", stats.doosync), + IGC_STAT("tx_smbus", stats.mgptc), + IGC_STAT("rx_smbus", stats.mgprc), + IGC_STAT("dropped_smbus", stats.mgpdc), + IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + IGC_STAT("tx_lpi_counter", stats.tlpic), + IGC_STAT("rx_lpi_counter", stats.rlpic), + IGC_STAT("qbv_config_change_errors", qbv_config_change_errors), +}; + +#define IGC_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} + +static const struct igc_stats igc_gstrings_net_stats[] = { + IGC_NETDEV_STAT(rx_errors), + IGC_NETDEV_STAT(tx_errors), + IGC_NETDEV_STAT(tx_dropped), + IGC_NETDEV_STAT(rx_length_errors), + IGC_NETDEV_STAT(rx_over_errors), + IGC_NETDEV_STAT(rx_frame_errors), + IGC_NETDEV_STAT(rx_fifo_errors), + IGC_NETDEV_STAT(tx_fifo_errors), + IGC_NETDEV_STAT(tx_heartbeat_errors) +}; + +enum igc_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; + +#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) + +#define IGC_GLOBAL_STATS_LEN \ + (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) +#define IGC_NETDEV_STATS_LEN \ + (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) +#define IGC_RX_QUEUE_STATS_LEN \ + (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) +#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +#define IGC_QUEUE_STATS_LEN \ + ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGC_RX_QUEUE_STATS_LEN) + \ + (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGC_TX_QUEUE_STATS_LEN)) +#define IGC_STATS_LEN \ + (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) + +static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) + +static void igc_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u16 nvm_version = 0; + u16 gphy_version; + + strscpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); + + /* NVM image version is reported as firmware version for i225 device */ + hw->nvm.ops.read(hw, IGC_NVM_DEV_STARTER, 1, &nvm_version); + + /* gPHY firmware version is reported as PHY FW version */ + gphy_version = igc_read_phy_fw_version(hw); + + scnprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%x:%x", + nvm_version, + gphy_version); + + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; +} + +static int igc_ethtool_get_regs_len(struct net_device *netdev) +{ + return IGC_REGS_LEN * sizeof(u32); +} + +static void igc_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGC_REGS_LEN * sizeof(u32)); + + regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(IGC_CTRL); + regs_buff[1] = rd32(IGC_STATUS); + regs_buff[2] = rd32(IGC_CTRL_EXT); + regs_buff[3] = rd32(IGC_MDIC); + regs_buff[4] = rd32(IGC_CONNSW); + + /* NVM Register */ + regs_buff[5] = rd32(IGC_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[6] = rd32(IGC_EICS); + regs_buff[7] = rd32(IGC_EICS); + regs_buff[8] = rd32(IGC_EIMS); + regs_buff[9] = rd32(IGC_EIMC); + regs_buff[10] = rd32(IGC_EIAC); + regs_buff[11] = rd32(IGC_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[12] = rd32(IGC_ICS); + regs_buff[13] = rd32(IGC_ICS); + regs_buff[14] = rd32(IGC_IMS); + regs_buff[15] = rd32(IGC_IMC); + regs_buff[16] = rd32(IGC_IAC); + regs_buff[17] = rd32(IGC_IAM); + + /* Flow Control */ + regs_buff[18] = rd32(IGC_FCAL); + regs_buff[19] = rd32(IGC_FCAH); + regs_buff[20] = rd32(IGC_FCTTV); + regs_buff[21] = rd32(IGC_FCRTL); + regs_buff[22] = rd32(IGC_FCRTH); + regs_buff[23] = rd32(IGC_FCRTV); + + /* Receive */ + regs_buff[24] = rd32(IGC_RCTL); + regs_buff[25] = rd32(IGC_RXCSUM); + regs_buff[26] = rd32(IGC_RLPML); + regs_buff[27] = rd32(IGC_RFCTL); + + /* Transmit */ + regs_buff[28] = rd32(IGC_TCTL); + regs_buff[29] = rd32(IGC_TIPG); + + /* Wake Up */ + + /* MAC */ + + /* Statistics */ + regs_buff[30] = adapter->stats.crcerrs; + regs_buff[31] = adapter->stats.algnerrc; + regs_buff[32] = adapter->stats.symerrs; + regs_buff[33] = adapter->stats.rxerrc; + regs_buff[34] = adapter->stats.mpc; + regs_buff[35] = adapter->stats.scc; + regs_buff[36] = adapter->stats.ecol; + regs_buff[37] = adapter->stats.mcc; + regs_buff[38] = adapter->stats.latecol; + regs_buff[39] = adapter->stats.colc; + regs_buff[40] = adapter->stats.dc; + regs_buff[41] = adapter->stats.tncrs; + regs_buff[42] = adapter->stats.sec; + regs_buff[43] = adapter->stats.htdpmc; + regs_buff[44] = adapter->stats.rlec; + regs_buff[45] = adapter->stats.xonrxc; + regs_buff[46] = adapter->stats.xontxc; + regs_buff[47] = adapter->stats.xoffrxc; + regs_buff[48] = adapter->stats.xofftxc; + regs_buff[49] = adapter->stats.fcruc; + regs_buff[50] = adapter->stats.prc64; + regs_buff[51] = adapter->stats.prc127; + regs_buff[52] = adapter->stats.prc255; + regs_buff[53] = adapter->stats.prc511; + regs_buff[54] = adapter->stats.prc1023; + regs_buff[55] = adapter->stats.prc1522; + regs_buff[56] = adapter->stats.gprc; + regs_buff[57] = adapter->stats.bprc; + regs_buff[58] = adapter->stats.mprc; + regs_buff[59] = adapter->stats.gptc; + regs_buff[60] = adapter->stats.gorc; + regs_buff[61] = adapter->stats.gotc; + regs_buff[62] = adapter->stats.rnbc; + regs_buff[63] = adapter->stats.ruc; + regs_buff[64] = adapter->stats.rfc; + regs_buff[65] = adapter->stats.roc; + regs_buff[66] = adapter->stats.rjc; + regs_buff[67] = adapter->stats.mgprc; + regs_buff[68] = adapter->stats.mgpdc; + regs_buff[69] = adapter->stats.mgptc; + regs_buff[70] = adapter->stats.tor; + regs_buff[71] = adapter->stats.tot; + regs_buff[72] = adapter->stats.tpr; + regs_buff[73] = adapter->stats.tpt; + regs_buff[74] = adapter->stats.ptc64; + regs_buff[75] = adapter->stats.ptc127; + regs_buff[76] = adapter->stats.ptc255; + regs_buff[77] = adapter->stats.ptc511; + regs_buff[78] = adapter->stats.ptc1023; + regs_buff[79] = adapter->stats.ptc1522; + regs_buff[80] = adapter->stats.mptc; + regs_buff[81] = adapter->stats.bptc; + regs_buff[82] = adapter->stats.tsctc; + regs_buff[83] = adapter->stats.iac; + regs_buff[84] = adapter->stats.rpthc; + regs_buff[85] = adapter->stats.hgptc; + regs_buff[86] = adapter->stats.hgorc; + regs_buff[87] = adapter->stats.hgotc; + regs_buff[88] = adapter->stats.lenerrs; + regs_buff[89] = adapter->stats.scvpc; + regs_buff[90] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[99 + i] = rd32(IGC_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[103 + i] = rd32(IGC_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[107 + i] = rd32(IGC_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[111 + i] = rd32(IGC_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[115 + i] = rd32(IGC_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[123 + i] = rd32(IGC_EITR(i)); + for (i = 0; i < 16; i++) + regs_buff[139 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[145 + i] = rd32(IGC_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(IGC_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[152 + i] = rd32(IGC_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[156 + i] = rd32(IGC_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[160 + i] = rd32(IGC_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[164 + i] = rd32(IGC_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); + + /* XXX: Due to a bug few lines above, RAL and RAH registers are + * overwritten. To preserve the ABI, we write these registers again in + * regs_buff. + */ + for (i = 0; i < 16; i++) + regs_buff[172 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[188 + i] = rd32(IGC_RAH(i)); + + regs_buff[204] = rd32(IGC_VLANPQF); + + for (i = 0; i < 8; i++) + regs_buff[205 + i] = rd32(IGC_ETQF(i)); + + regs_buff[213] = adapter->stats.tlpic; + regs_buff[214] = adapter->stats.rlpic; +} + +static void igc_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & IGC_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IGC_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IGC_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IGC_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & IGC_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igc_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IGC_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IGC_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IGC_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IGC_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= IGC_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static u32 igc_ethtool_get_msglevel(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int igc_ethtool_nway_reset(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + return 0; +} + +static u32 igc_ethtool_get_link(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igc_has_link(adapter); +} + +static int igc_ethtool_get_eeprom_len(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.nvm.word_size * 2; +} + +static int igc_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int first_word, last_word; + u16 *eeprom_buff; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == igc_nvm_eeprom_spi) { + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igc_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int max_len, first_word, last_word, ret_val = 0; + u16 *eeprom_buff; + void *ptr; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (hw->mac.type >= igc_i225 && + !igc_get_flash_presence_i225(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void +igc_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGC_MAX_RXD; + ring->tx_max_pending = IGC_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int +igc_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_ring *temp_ring; + u16 new_rx_count, new_tx_count; + int i, err = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igc_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_tx_count; + err = igc_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igc_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_rx_count; + err = igc_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igc_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igc_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGC_RESETTING, &adapter->state); + return err; +} + +static void igc_ethtool_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == igc_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igc_ethtool_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = igc_fc_default; + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == igc_media_type_copper) ? + igc_force_mac_fc(hw) : igc_setup_link(hw)); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + return retval; +} + +static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igc_gstrings_test, + IGC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string); + for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igc_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igc_priv_flags_strings, + IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGC_STATS_LEN; + case ETH_SS_TEST: + return IGC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGC_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igc_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igc_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igc_gstrings_stats[i].stat_offset; + data[i] = (igc_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; + data[i] = (igc_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i + 1] = ring->tx_stats.bytes; + data[i + 2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); + data[i + 2] += restart2; + + i += IGC_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i + 1] = ring->rx_stats.bytes; + data[i + 2] = ring->rx_stats.drops; + data[i + 3] = ring->rx_stats.csum_err; + data[i + 4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + i += IGC_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static int igc_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igc_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->rx_coalesce_usecs > 3 && + ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->rx_coalesce_usecs == 2) + return -EINVAL; + + if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->tx_coalesce_usecs > 3 && + ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->tx_coalesce_usecs == 2) + return -EINVAL; + + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGC_FLAG_DMAC) + adapter->flags &= ~IGC_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGC_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igc_nfc_rule *rule = NULL; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) + goto out; + + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype); + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) { + fsp->flow_type |= FLOW_EXT; + memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data)); + } + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +out: + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; +} + +static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_nfc_rule *rule; + int cnt = 0; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (cnt == cmd->rule_cnt) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EMSGSIZE; + } + rule_locs[cnt] = rule->location; + cnt++; + } + + mutex_unlock(&adapter->nfc_rule_lock); + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igc */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igc_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + return 0; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_rule_count; + return 0; + case ETHTOOL_GRXCLSRULE: + return igc_ethtool_get_nfc_rule(adapter, cmd); + case ETHTOOL_GRXCLSRLALL: + return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs); + case ETHTOOL_GRXFH: + return igc_ethtool_get_rss_hash_opts(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ + IGC_FLAG_RSS_FIELD_IPV6_UDP) +static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + netdev_err(adapter->netdev, + "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | + IGC_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(IGC_MRQC, mrqc); + } + + return 0; +} + +static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, + const struct ethtool_rx_flow_spec *fsp) +{ + INIT_LIST_HEAD(&rule->list); + + rule->action = fsp->ring_cookie; + rule->location = fsp->location; + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto); + rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; + } + + /* Both source and destination address filters only support the full + * mask. + */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(rule->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(rule->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + /* VLAN etype matching */ + if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) { + rule->filter.vlan_etype = fsp->h_ext.vlan_etype; + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE; + } + + /* Check for user defined data */ + if ((fsp->flow_type & FLOW_EXT) && + (fsp->h_ext.data[0] || fsp->h_ext.data[1])) { + rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA; + memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data)); + memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); + } + + /* When multiple filter options or user data or vlan etype is set, use a + * flex filter. + */ + if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || + (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || + (rule->filter.match_flags & (rule->filter.match_flags - 1))) + rule->flex = true; + else + rule->flex = false; +} + +/** + * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid + * @adapter: Pointer to adapter + * @rule: Rule under evaluation + * + * The driver doesn't support rules with multiple matches so if more than + * one bit in filter flags is set, @rule is considered invalid. + * + * Also, if there is already another rule with the same filter in a different + * location, @rule is considered invalid. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct net_device *dev = adapter->netdev; + u8 flags = rule->filter.match_flags; + struct igc_nfc_rule *tmp; + + if (!flags) { + netdev_dbg(dev, "Rule with no match\n"); + return -EINVAL; + } + + list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { + if (!memcmp(&rule->filter, &tmp->filter, + sizeof(rule->filter)) && + tmp->location != rule->location) { + netdev_dbg(dev, "Rule already exists\n"); + return -EEXIST; + } + } + + return 0; +} + +static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule, *old_rule; + int err; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) { + netdev_dbg(netdev, "N-tuple filters disabled\n"); + return -EOPNOTSUPP; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) { + netdev_dbg(netdev, "Only ethernet flow type is supported\n"); + return -EOPNOTSUPP; + } + + if (fsp->ring_cookie >= adapter->num_rx_queues) { + netdev_dbg(netdev, "Invalid action\n"); + return -EINVAL; + } + + if (fsp->location >= IGC_MAX_RXNFC_RULES) { + netdev_dbg(netdev, "Invalid location\n"); + return -EINVAL; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + igc_ethtool_init_nfc_rule(rule, fsp); + + mutex_lock(&adapter->nfc_rule_lock); + + err = igc_ethtool_check_nfc_rule(adapter, rule); + if (err) + goto err; + + old_rule = igc_get_nfc_rule(adapter, fsp->location); + if (old_rule) + igc_del_nfc_rule(adapter, old_rule); + + err = igc_add_nfc_rule(adapter, rule); + if (err) + goto err; + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +err: + mutex_unlock(&adapter->nfc_rule_lock); + kfree(rule); + return err; +} + +static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; + } + + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; +} + +static int igc_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return igc_ethtool_set_rss_hash_opt(adapter, cmd); + case ETHTOOL_SRXCLSRLINS: + return igc_ethtool_add_nfc_rule(adapter, cmd); + case ETHTOOL_SRXCLSRLDEL: + return igc_ethtool_del_nfc_rule(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +void igc_write_rss_indir_tbl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 reg = IGC_RETA(0); + u32 shift = 0; + int i = 0; + + while (i < IGC_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGC_RETA_SIZE; +} + +static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGC_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 num_queues; + int i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + /* Verify user input. */ + for (i = 0; i < IGC_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + for (i = 0; i < IGC_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igc_write_rss_indir_tbl(adapter); + + return 0; +} + +static void igc_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igc_get_max_rss_queues(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igc_ethtool_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igc_get_max_rss_queues(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igc_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igc_reinit_queues(adapter); + } + + return 0; +} + +static int igc_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case igc_i225: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +static u32 igc_ethtool_get_priv_flags(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGC_FLAG_RX_LEGACY) + priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGC_FLAG_RX_LEGACY; + if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) + flags |= IGC_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + } + + return 0; +} + +static int igc_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 eeer; + + if (hw->dev_spec._base.eee_enable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + *edata = adapter->eee; + edata->supported = SUPPORTED_Autoneg; + + eeer = rd32(IGC_EEER); + + /* EEE status on negotiated link */ + if (eeer & IGC_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & IGC_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + + edata->eee_enabled = hw->dev_spec._base.eee_enable; + + edata->advertised = SUPPORTED_Autoneg; + edata->lp_advertised = SUPPORTED_Autoneg; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igc_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igc_ethtool_get_eee(netdev, &eee_curr); + if (ret_val) { + netdev_err(netdev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + netdev_err(netdev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + netdev_err(netdev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + } else if (!edata->eee_enabled) { + netdev_err(netdev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { + hw->dev_spec._base.eee_enable = edata->eee_enabled; + adapter->flags |= IGC_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + } + + return 0; +} + +static int igc_ethtool_begin(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igc_ethtool_complete(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_put(&adapter->pdev->dev); +} + +static int igc_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 status; + u32 speed; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* supported link modes */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); + + /* twisted pair */ + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + + /* advertising link modes */ + if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + } + + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case igc_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case igc_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case igc_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + break; + } + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both + * 1 Gbps and 2.5 Gbps link modes. + * An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + speed = SPEED_2500; + } else { + speed = SPEED_1000; + } + } else if (status & IGC_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & IGC_STATUS_FD) || + hw->phy.media_type != igc_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if (hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == igc_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int +igc_ethtool_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 advertising; + + /* When adapter in resetting mode, autoneg/speed/duplex + * cannot be changed + */ + if (igc_check_reset_block(hw)) { + netdev_err(dev, "Cannot change link characteristics when reset is active\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && + cmd->base.autoneg != AUTONEG_ENABLE) { + netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. + * We have to check this and convert it to ADVERTISE_2500_FULL + * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. + */ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) + advertising |= ADVERTISE_2500_FULL; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + hw->phy.autoneg_advertised = advertising; + if (adapter->fc_autoneg) + hw->fc.requested_mode = igc_fc_default; + } else { + netdev_info(dev, "Force mode currently not supported\n"); + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +static void igc_ethtool_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + netdev_info(adapter->netdev, "Offline testing starting"); + set_bit(__IGC_TESTING, &adapter->state); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + igc_close(netdev); + else + igc_reset(adapter); + + netdev_info(adapter->netdev, "Register testing starting"); + if (!igc_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + netdev_info(adapter->netdev, "EEPROM testing starting"); + if (!igc_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + /* loopback and interrupt tests + * will be implemented in the future + */ + data[TEST_LOOP] = 0; + data[TEST_IRQ] = 0; + + clear_bit(__IGC_TESTING, &adapter->state); + if (if_running) + igc_open(netdev); + } else { + netdev_info(adapter->netdev, "Online testing starting"); + + /* register, eeprom, intr and loopback tests not run online */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + msleep_interruptible(4 * 1000); +} + +static const struct ethtool_ops igc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igc_ethtool_get_drvinfo, + .get_regs_len = igc_ethtool_get_regs_len, + .get_regs = igc_ethtool_get_regs, + .get_wol = igc_ethtool_get_wol, + .set_wol = igc_ethtool_set_wol, + .get_msglevel = igc_ethtool_get_msglevel, + .set_msglevel = igc_ethtool_set_msglevel, + .nway_reset = igc_ethtool_nway_reset, + .get_link = igc_ethtool_get_link, + .get_eeprom_len = igc_ethtool_get_eeprom_len, + .get_eeprom = igc_ethtool_get_eeprom, + .set_eeprom = igc_ethtool_set_eeprom, + .get_ringparam = igc_ethtool_get_ringparam, + .set_ringparam = igc_ethtool_set_ringparam, + .get_pauseparam = igc_ethtool_get_pauseparam, + .set_pauseparam = igc_ethtool_set_pauseparam, + .get_strings = igc_ethtool_get_strings, + .get_sset_count = igc_ethtool_get_sset_count, + .get_ethtool_stats = igc_ethtool_get_stats, + .get_coalesce = igc_ethtool_get_coalesce, + .set_coalesce = igc_ethtool_set_coalesce, + .get_rxnfc = igc_ethtool_get_rxnfc, + .set_rxnfc = igc_ethtool_set_rxnfc, + .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, + .get_rxfh = igc_ethtool_get_rxfh, + .set_rxfh = igc_ethtool_set_rxfh, + .get_ts_info = igc_ethtool_get_ts_info, + .get_channels = igc_ethtool_get_channels, + .set_channels = igc_ethtool_set_channels, + .get_priv_flags = igc_ethtool_get_priv_flags, + .set_priv_flags = igc_ethtool_set_priv_flags, + .get_eee = igc_ethtool_get_eee, + .set_eee = igc_ethtool_set_eee, + .begin = igc_ethtool_begin, + .complete = igc_ethtool_complete, + .get_link_ksettings = igc_ethtool_get_link_ksettings, + .set_link_ksettings = igc_ethtool_set_link_ksettings, + .self_test = igc_ethtool_diag_test, +}; + +void igc_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igc_ethtool_ops; +} diff --git a/devices/igc/igc_ethtool-6.4-orig.c b/devices/igc/igc_ethtool-6.4-orig.c new file mode 100644 index 00000000..93bce729 --- /dev/null +++ b/devices/igc/igc_ethtool-6.4-orig.c @@ -0,0 +1,1983 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +/* ethtool support for igc */ +#include +#include +#include + +#include "igc.h" +#include "igc_diag.h" + +/* forward declaration */ +struct igc_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \ + .stat_offset = offsetof(struct igc_adapter, _stat) \ +} + +static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_packets", stats.gprc), + IGC_STAT("tx_packets", stats.gptc), + IGC_STAT("rx_bytes", stats.gorc), + IGC_STAT("tx_bytes", stats.gotc), + IGC_STAT("rx_broadcast", stats.bprc), + IGC_STAT("tx_broadcast", stats.bptc), + IGC_STAT("rx_multicast", stats.mprc), + IGC_STAT("tx_multicast", stats.mptc), + IGC_STAT("multicast", stats.mprc), + IGC_STAT("collisions", stats.colc), + IGC_STAT("rx_crc_errors", stats.crcerrs), + IGC_STAT("rx_no_buffer_count", stats.rnbc), + IGC_STAT("rx_missed_errors", stats.mpc), + IGC_STAT("tx_aborted_errors", stats.ecol), + IGC_STAT("tx_carrier_errors", stats.tncrs), + IGC_STAT("tx_window_errors", stats.latecol), + IGC_STAT("tx_abort_late_coll", stats.latecol), + IGC_STAT("tx_deferred_ok", stats.dc), + IGC_STAT("tx_single_coll_ok", stats.scc), + IGC_STAT("tx_multi_coll_ok", stats.mcc), + IGC_STAT("tx_timeout_count", tx_timeout_count), + IGC_STAT("rx_long_length_errors", stats.roc), + IGC_STAT("rx_short_length_errors", stats.ruc), + IGC_STAT("rx_align_errors", stats.algnerrc), + IGC_STAT("tx_tcp_seg_good", stats.tsctc), + IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGC_STAT("rx_flow_control_xon", stats.xonrxc), + IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGC_STAT("tx_flow_control_xon", stats.xontxc), + IGC_STAT("tx_flow_control_xoff", stats.xofftxc), + IGC_STAT("rx_long_byte_count", stats.gorc), + IGC_STAT("tx_dma_out_of_sync", stats.doosync), + IGC_STAT("tx_smbus", stats.mgptc), + IGC_STAT("rx_smbus", stats.mgprc), + IGC_STAT("dropped_smbus", stats.mgpdc), + IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + IGC_STAT("tx_lpi_counter", stats.tlpic), + IGC_STAT("rx_lpi_counter", stats.rlpic), + IGC_STAT("qbv_config_change_errors", qbv_config_change_errors), +}; + +#define IGC_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} + +static const struct igc_stats igc_gstrings_net_stats[] = { + IGC_NETDEV_STAT(rx_errors), + IGC_NETDEV_STAT(tx_errors), + IGC_NETDEV_STAT(tx_dropped), + IGC_NETDEV_STAT(rx_length_errors), + IGC_NETDEV_STAT(rx_over_errors), + IGC_NETDEV_STAT(rx_frame_errors), + IGC_NETDEV_STAT(rx_fifo_errors), + IGC_NETDEV_STAT(tx_fifo_errors), + IGC_NETDEV_STAT(tx_heartbeat_errors) +}; + +enum igc_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; + +#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) + +#define IGC_GLOBAL_STATS_LEN \ + (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) +#define IGC_NETDEV_STATS_LEN \ + (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) +#define IGC_RX_QUEUE_STATS_LEN \ + (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) +#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +#define IGC_QUEUE_STATS_LEN \ + ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGC_RX_QUEUE_STATS_LEN) + \ + (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGC_TX_QUEUE_STATS_LEN)) +#define IGC_STATS_LEN \ + (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) + +static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) + +static void igc_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u16 nvm_version = 0; + u16 gphy_version; + + strscpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); + + /* NVM image version is reported as firmware version for i225 device */ + hw->nvm.ops.read(hw, IGC_NVM_DEV_STARTER, 1, &nvm_version); + + /* gPHY firmware version is reported as PHY FW version */ + gphy_version = igc_read_phy_fw_version(hw); + + scnprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%x:%x", + nvm_version, + gphy_version); + + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; +} + +static int igc_ethtool_get_regs_len(struct net_device *netdev) +{ + return IGC_REGS_LEN * sizeof(u32); +} + +static void igc_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGC_REGS_LEN * sizeof(u32)); + + regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(IGC_CTRL); + regs_buff[1] = rd32(IGC_STATUS); + regs_buff[2] = rd32(IGC_CTRL_EXT); + regs_buff[3] = rd32(IGC_MDIC); + regs_buff[4] = rd32(IGC_CONNSW); + + /* NVM Register */ + regs_buff[5] = rd32(IGC_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[6] = rd32(IGC_EICS); + regs_buff[7] = rd32(IGC_EICS); + regs_buff[8] = rd32(IGC_EIMS); + regs_buff[9] = rd32(IGC_EIMC); + regs_buff[10] = rd32(IGC_EIAC); + regs_buff[11] = rd32(IGC_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[12] = rd32(IGC_ICS); + regs_buff[13] = rd32(IGC_ICS); + regs_buff[14] = rd32(IGC_IMS); + regs_buff[15] = rd32(IGC_IMC); + regs_buff[16] = rd32(IGC_IAC); + regs_buff[17] = rd32(IGC_IAM); + + /* Flow Control */ + regs_buff[18] = rd32(IGC_FCAL); + regs_buff[19] = rd32(IGC_FCAH); + regs_buff[20] = rd32(IGC_FCTTV); + regs_buff[21] = rd32(IGC_FCRTL); + regs_buff[22] = rd32(IGC_FCRTH); + regs_buff[23] = rd32(IGC_FCRTV); + + /* Receive */ + regs_buff[24] = rd32(IGC_RCTL); + regs_buff[25] = rd32(IGC_RXCSUM); + regs_buff[26] = rd32(IGC_RLPML); + regs_buff[27] = rd32(IGC_RFCTL); + + /* Transmit */ + regs_buff[28] = rd32(IGC_TCTL); + regs_buff[29] = rd32(IGC_TIPG); + + /* Wake Up */ + + /* MAC */ + + /* Statistics */ + regs_buff[30] = adapter->stats.crcerrs; + regs_buff[31] = adapter->stats.algnerrc; + regs_buff[32] = adapter->stats.symerrs; + regs_buff[33] = adapter->stats.rxerrc; + regs_buff[34] = adapter->stats.mpc; + regs_buff[35] = adapter->stats.scc; + regs_buff[36] = adapter->stats.ecol; + regs_buff[37] = adapter->stats.mcc; + regs_buff[38] = adapter->stats.latecol; + regs_buff[39] = adapter->stats.colc; + regs_buff[40] = adapter->stats.dc; + regs_buff[41] = adapter->stats.tncrs; + regs_buff[42] = adapter->stats.sec; + regs_buff[43] = adapter->stats.htdpmc; + regs_buff[44] = adapter->stats.rlec; + regs_buff[45] = adapter->stats.xonrxc; + regs_buff[46] = adapter->stats.xontxc; + regs_buff[47] = adapter->stats.xoffrxc; + regs_buff[48] = adapter->stats.xofftxc; + regs_buff[49] = adapter->stats.fcruc; + regs_buff[50] = adapter->stats.prc64; + regs_buff[51] = adapter->stats.prc127; + regs_buff[52] = adapter->stats.prc255; + regs_buff[53] = adapter->stats.prc511; + regs_buff[54] = adapter->stats.prc1023; + regs_buff[55] = adapter->stats.prc1522; + regs_buff[56] = adapter->stats.gprc; + regs_buff[57] = adapter->stats.bprc; + regs_buff[58] = adapter->stats.mprc; + regs_buff[59] = adapter->stats.gptc; + regs_buff[60] = adapter->stats.gorc; + regs_buff[61] = adapter->stats.gotc; + regs_buff[62] = adapter->stats.rnbc; + regs_buff[63] = adapter->stats.ruc; + regs_buff[64] = adapter->stats.rfc; + regs_buff[65] = adapter->stats.roc; + regs_buff[66] = adapter->stats.rjc; + regs_buff[67] = adapter->stats.mgprc; + regs_buff[68] = adapter->stats.mgpdc; + regs_buff[69] = adapter->stats.mgptc; + regs_buff[70] = adapter->stats.tor; + regs_buff[71] = adapter->stats.tot; + regs_buff[72] = adapter->stats.tpr; + regs_buff[73] = adapter->stats.tpt; + regs_buff[74] = adapter->stats.ptc64; + regs_buff[75] = adapter->stats.ptc127; + regs_buff[76] = adapter->stats.ptc255; + regs_buff[77] = adapter->stats.ptc511; + regs_buff[78] = adapter->stats.ptc1023; + regs_buff[79] = adapter->stats.ptc1522; + regs_buff[80] = adapter->stats.mptc; + regs_buff[81] = adapter->stats.bptc; + regs_buff[82] = adapter->stats.tsctc; + regs_buff[83] = adapter->stats.iac; + regs_buff[84] = adapter->stats.rpthc; + regs_buff[85] = adapter->stats.hgptc; + regs_buff[86] = adapter->stats.hgorc; + regs_buff[87] = adapter->stats.hgotc; + regs_buff[88] = adapter->stats.lenerrs; + regs_buff[89] = adapter->stats.scvpc; + regs_buff[90] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[99 + i] = rd32(IGC_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[103 + i] = rd32(IGC_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[107 + i] = rd32(IGC_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[111 + i] = rd32(IGC_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[115 + i] = rd32(IGC_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[123 + i] = rd32(IGC_EITR(i)); + for (i = 0; i < 16; i++) + regs_buff[139 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[145 + i] = rd32(IGC_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(IGC_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[152 + i] = rd32(IGC_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[156 + i] = rd32(IGC_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[160 + i] = rd32(IGC_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[164 + i] = rd32(IGC_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); + + /* XXX: Due to a bug few lines above, RAL and RAH registers are + * overwritten. To preserve the ABI, we write these registers again in + * regs_buff. + */ + for (i = 0; i < 16; i++) + regs_buff[172 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[188 + i] = rd32(IGC_RAH(i)); + + regs_buff[204] = rd32(IGC_VLANPQF); + + for (i = 0; i < 8; i++) + regs_buff[205 + i] = rd32(IGC_ETQF(i)); + + regs_buff[213] = adapter->stats.tlpic; + regs_buff[214] = adapter->stats.rlpic; +} + +static void igc_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & IGC_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IGC_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IGC_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IGC_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & IGC_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igc_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IGC_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IGC_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IGC_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IGC_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= IGC_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static u32 igc_ethtool_get_msglevel(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int igc_ethtool_nway_reset(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + return 0; +} + +static u32 igc_ethtool_get_link(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igc_has_link(adapter); +} + +static int igc_ethtool_get_eeprom_len(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.nvm.word_size * 2; +} + +static int igc_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int first_word, last_word; + u16 *eeprom_buff; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == igc_nvm_eeprom_spi) { + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igc_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int max_len, first_word, last_word, ret_val = 0; + u16 *eeprom_buff; + void *ptr; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (hw->mac.type >= igc_i225 && + !igc_get_flash_presence_i225(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void +igc_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGC_MAX_RXD; + ring->tx_max_pending = IGC_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int +igc_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_ring *temp_ring; + u16 new_rx_count, new_tx_count; + int i, err = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igc_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_tx_count; + err = igc_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igc_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_rx_count; + err = igc_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igc_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igc_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGC_RESETTING, &adapter->state); + return err; +} + +static void igc_ethtool_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == igc_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igc_ethtool_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = igc_fc_default; + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == igc_media_type_copper) ? + igc_force_mac_fc(hw) : igc_setup_link(hw)); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + return retval; +} + +static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igc_gstrings_test, + IGC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string); + for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igc_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igc_priv_flags_strings, + IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGC_STATS_LEN; + case ETH_SS_TEST: + return IGC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGC_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igc_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igc_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igc_gstrings_stats[i].stat_offset; + data[i] = (igc_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; + data[i] = (igc_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i + 1] = ring->tx_stats.bytes; + data[i + 2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); + data[i + 2] += restart2; + + i += IGC_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i + 1] = ring->rx_stats.bytes; + data[i + 2] = ring->rx_stats.drops; + data[i + 3] = ring->rx_stats.csum_err; + data[i + 4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + i += IGC_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static int igc_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igc_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->rx_coalesce_usecs > 3 && + ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->rx_coalesce_usecs == 2) + return -EINVAL; + + if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->tx_coalesce_usecs > 3 && + ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->tx_coalesce_usecs == 2) + return -EINVAL; + + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGC_FLAG_DMAC) + adapter->flags &= ~IGC_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGC_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igc_nfc_rule *rule = NULL; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) + goto out; + + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype); + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) { + fsp->flow_type |= FLOW_EXT; + memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data)); + } + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +out: + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; +} + +static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_nfc_rule *rule; + int cnt = 0; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (cnt == cmd->rule_cnt) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EMSGSIZE; + } + rule_locs[cnt] = rule->location; + cnt++; + } + + mutex_unlock(&adapter->nfc_rule_lock); + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igc */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igc_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + return 0; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_rule_count; + return 0; + case ETHTOOL_GRXCLSRULE: + return igc_ethtool_get_nfc_rule(adapter, cmd); + case ETHTOOL_GRXCLSRLALL: + return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs); + case ETHTOOL_GRXFH: + return igc_ethtool_get_rss_hash_opts(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ + IGC_FLAG_RSS_FIELD_IPV6_UDP) +static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + netdev_err(adapter->netdev, + "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | + IGC_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(IGC_MRQC, mrqc); + } + + return 0; +} + +static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, + const struct ethtool_rx_flow_spec *fsp) +{ + INIT_LIST_HEAD(&rule->list); + + rule->action = fsp->ring_cookie; + rule->location = fsp->location; + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto); + rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; + } + + /* Both source and destination address filters only support the full + * mask. + */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(rule->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(rule->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + /* VLAN etype matching */ + if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) { + rule->filter.vlan_etype = fsp->h_ext.vlan_etype; + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE; + } + + /* Check for user defined data */ + if ((fsp->flow_type & FLOW_EXT) && + (fsp->h_ext.data[0] || fsp->h_ext.data[1])) { + rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA; + memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data)); + memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); + } + + /* When multiple filter options or user data or vlan etype is set, use a + * flex filter. + */ + if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || + (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || + (rule->filter.match_flags & (rule->filter.match_flags - 1))) + rule->flex = true; + else + rule->flex = false; +} + +/** + * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid + * @adapter: Pointer to adapter + * @rule: Rule under evaluation + * + * The driver doesn't support rules with multiple matches so if more than + * one bit in filter flags is set, @rule is considered invalid. + * + * Also, if there is already another rule with the same filter in a different + * location, @rule is considered invalid. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct net_device *dev = adapter->netdev; + u8 flags = rule->filter.match_flags; + struct igc_nfc_rule *tmp; + + if (!flags) { + netdev_dbg(dev, "Rule with no match\n"); + return -EINVAL; + } + + list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { + if (!memcmp(&rule->filter, &tmp->filter, + sizeof(rule->filter)) && + tmp->location != rule->location) { + netdev_dbg(dev, "Rule already exists\n"); + return -EEXIST; + } + } + + return 0; +} + +static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule, *old_rule; + int err; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) { + netdev_dbg(netdev, "N-tuple filters disabled\n"); + return -EOPNOTSUPP; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) { + netdev_dbg(netdev, "Only ethernet flow type is supported\n"); + return -EOPNOTSUPP; + } + + if (fsp->ring_cookie >= adapter->num_rx_queues) { + netdev_dbg(netdev, "Invalid action\n"); + return -EINVAL; + } + + if (fsp->location >= IGC_MAX_RXNFC_RULES) { + netdev_dbg(netdev, "Invalid location\n"); + return -EINVAL; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + igc_ethtool_init_nfc_rule(rule, fsp); + + mutex_lock(&adapter->nfc_rule_lock); + + err = igc_ethtool_check_nfc_rule(adapter, rule); + if (err) + goto err; + + old_rule = igc_get_nfc_rule(adapter, fsp->location); + if (old_rule) + igc_del_nfc_rule(adapter, old_rule); + + err = igc_add_nfc_rule(adapter, rule); + if (err) + goto err; + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +err: + mutex_unlock(&adapter->nfc_rule_lock); + kfree(rule); + return err; +} + +static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; + } + + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; +} + +static int igc_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return igc_ethtool_set_rss_hash_opt(adapter, cmd); + case ETHTOOL_SRXCLSRLINS: + return igc_ethtool_add_nfc_rule(adapter, cmd); + case ETHTOOL_SRXCLSRLDEL: + return igc_ethtool_del_nfc_rule(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +void igc_write_rss_indir_tbl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 reg = IGC_RETA(0); + u32 shift = 0; + int i = 0; + + while (i < IGC_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGC_RETA_SIZE; +} + +static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGC_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 num_queues; + int i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + /* Verify user input. */ + for (i = 0; i < IGC_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + for (i = 0; i < IGC_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igc_write_rss_indir_tbl(adapter); + + return 0; +} + +static void igc_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igc_get_max_rss_queues(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igc_ethtool_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igc_get_max_rss_queues(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igc_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igc_reinit_queues(adapter); + } + + return 0; +} + +static int igc_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case igc_i225: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +static u32 igc_ethtool_get_priv_flags(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGC_FLAG_RX_LEGACY) + priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGC_FLAG_RX_LEGACY; + if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) + flags |= IGC_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + } + + return 0; +} + +static int igc_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 eeer; + + if (hw->dev_spec._base.eee_enable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + *edata = adapter->eee; + edata->supported = SUPPORTED_Autoneg; + + eeer = rd32(IGC_EEER); + + /* EEE status on negotiated link */ + if (eeer & IGC_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & IGC_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + + edata->eee_enabled = hw->dev_spec._base.eee_enable; + + edata->advertised = SUPPORTED_Autoneg; + edata->lp_advertised = SUPPORTED_Autoneg; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igc_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igc_ethtool_get_eee(netdev, &eee_curr); + if (ret_val) { + netdev_err(netdev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + netdev_err(netdev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + netdev_err(netdev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + } else if (!edata->eee_enabled) { + netdev_err(netdev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { + hw->dev_spec._base.eee_enable = edata->eee_enabled; + adapter->flags |= IGC_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + } + + return 0; +} + +static int igc_ethtool_begin(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igc_ethtool_complete(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_put(&adapter->pdev->dev); +} + +static int igc_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 status; + u32 speed; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* supported link modes */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); + + /* twisted pair */ + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + + /* advertising link modes */ + if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + } + + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case igc_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case igc_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case igc_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + break; + } + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both + * 1 Gbps and 2.5 Gbps link modes. + * An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + speed = SPEED_2500; + } else { + speed = SPEED_1000; + } + } else if (status & IGC_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & IGC_STATUS_FD) || + hw->phy.media_type != igc_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if (hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == igc_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int +igc_ethtool_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 advertising; + + /* When adapter in resetting mode, autoneg/speed/duplex + * cannot be changed + */ + if (igc_check_reset_block(hw)) { + netdev_err(dev, "Cannot change link characteristics when reset is active\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && + cmd->base.autoneg != AUTONEG_ENABLE) { + netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. + * We have to check this and convert it to ADVERTISE_2500_FULL + * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. + */ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) + advertising |= ADVERTISE_2500_FULL; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + hw->phy.autoneg_advertised = advertising; + if (adapter->fc_autoneg) + hw->fc.requested_mode = igc_fc_default; + } else { + netdev_info(dev, "Force mode currently not supported\n"); + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +static void igc_ethtool_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + netdev_info(adapter->netdev, "Offline testing starting"); + set_bit(__IGC_TESTING, &adapter->state); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + igc_close(netdev); + else + igc_reset(adapter); + + netdev_info(adapter->netdev, "Register testing starting"); + if (!igc_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + netdev_info(adapter->netdev, "EEPROM testing starting"); + if (!igc_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + /* loopback and interrupt tests + * will be implemented in the future + */ + data[TEST_LOOP] = 0; + data[TEST_IRQ] = 0; + + clear_bit(__IGC_TESTING, &adapter->state); + if (if_running) + igc_open(netdev); + } else { + netdev_info(adapter->netdev, "Online testing starting"); + + /* register, eeprom, intr and loopback tests not run online */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + msleep_interruptible(4 * 1000); +} + +static const struct ethtool_ops igc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igc_ethtool_get_drvinfo, + .get_regs_len = igc_ethtool_get_regs_len, + .get_regs = igc_ethtool_get_regs, + .get_wol = igc_ethtool_get_wol, + .set_wol = igc_ethtool_set_wol, + .get_msglevel = igc_ethtool_get_msglevel, + .set_msglevel = igc_ethtool_set_msglevel, + .nway_reset = igc_ethtool_nway_reset, + .get_link = igc_ethtool_get_link, + .get_eeprom_len = igc_ethtool_get_eeprom_len, + .get_eeprom = igc_ethtool_get_eeprom, + .set_eeprom = igc_ethtool_set_eeprom, + .get_ringparam = igc_ethtool_get_ringparam, + .set_ringparam = igc_ethtool_set_ringparam, + .get_pauseparam = igc_ethtool_get_pauseparam, + .set_pauseparam = igc_ethtool_set_pauseparam, + .get_strings = igc_ethtool_get_strings, + .get_sset_count = igc_ethtool_get_sset_count, + .get_ethtool_stats = igc_ethtool_get_stats, + .get_coalesce = igc_ethtool_get_coalesce, + .set_coalesce = igc_ethtool_set_coalesce, + .get_rxnfc = igc_ethtool_get_rxnfc, + .set_rxnfc = igc_ethtool_set_rxnfc, + .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, + .get_rxfh = igc_ethtool_get_rxfh, + .set_rxfh = igc_ethtool_set_rxfh, + .get_ts_info = igc_ethtool_get_ts_info, + .get_channels = igc_ethtool_get_channels, + .set_channels = igc_ethtool_set_channels, + .get_priv_flags = igc_ethtool_get_priv_flags, + .set_priv_flags = igc_ethtool_set_priv_flags, + .get_eee = igc_ethtool_get_eee, + .set_eee = igc_ethtool_set_eee, + .begin = igc_ethtool_begin, + .complete = igc_ethtool_complete, + .get_link_ksettings = igc_ethtool_get_link_ksettings, + .set_link_ksettings = igc_ethtool_set_link_ksettings, + .self_test = igc_ethtool_diag_test, +}; + +void igc_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igc_ethtool_ops; +} diff --git a/devices/igc/igc_ethtool-6.6-ethercat.c b/devices/igc/igc_ethtool-6.6-ethercat.c new file mode 100644 index 00000000..9d11b6af --- /dev/null +++ b/devices/igc/igc_ethtool-6.6-ethercat.c @@ -0,0 +1,1990 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +/* ethtool support for igc */ +#include +#include +#include + +#include "igc-6.6-ethercat.h" +#include "igc_diag-6.6-ethercat.h" + +/* forward declaration */ +struct igc_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \ + .stat_offset = offsetof(struct igc_adapter, _stat) \ +} + +static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_packets", stats.gprc), + IGC_STAT("tx_packets", stats.gptc), + IGC_STAT("rx_bytes", stats.gorc), + IGC_STAT("tx_bytes", stats.gotc), + IGC_STAT("rx_broadcast", stats.bprc), + IGC_STAT("tx_broadcast", stats.bptc), + IGC_STAT("rx_multicast", stats.mprc), + IGC_STAT("tx_multicast", stats.mptc), + IGC_STAT("multicast", stats.mprc), + IGC_STAT("collisions", stats.colc), + IGC_STAT("rx_crc_errors", stats.crcerrs), + IGC_STAT("rx_no_buffer_count", stats.rnbc), + IGC_STAT("rx_missed_errors", stats.mpc), + IGC_STAT("tx_aborted_errors", stats.ecol), + IGC_STAT("tx_carrier_errors", stats.tncrs), + IGC_STAT("tx_window_errors", stats.latecol), + IGC_STAT("tx_abort_late_coll", stats.latecol), + IGC_STAT("tx_deferred_ok", stats.dc), + IGC_STAT("tx_single_coll_ok", stats.scc), + IGC_STAT("tx_multi_coll_ok", stats.mcc), + IGC_STAT("tx_timeout_count", tx_timeout_count), + IGC_STAT("rx_long_length_errors", stats.roc), + IGC_STAT("rx_short_length_errors", stats.ruc), + IGC_STAT("rx_align_errors", stats.algnerrc), + IGC_STAT("tx_tcp_seg_good", stats.tsctc), + IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGC_STAT("rx_flow_control_xon", stats.xonrxc), + IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGC_STAT("tx_flow_control_xon", stats.xontxc), + IGC_STAT("tx_flow_control_xoff", stats.xofftxc), + IGC_STAT("rx_long_byte_count", stats.gorc), + IGC_STAT("tx_dma_out_of_sync", stats.doosync), + IGC_STAT("tx_smbus", stats.mgptc), + IGC_STAT("rx_smbus", stats.mgprc), + IGC_STAT("dropped_smbus", stats.mgpdc), + IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + IGC_STAT("tx_lpi_counter", stats.tlpic), + IGC_STAT("rx_lpi_counter", stats.rlpic), + IGC_STAT("qbv_config_change_errors", qbv_config_change_errors), +}; + +#define IGC_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} + +static const struct igc_stats igc_gstrings_net_stats[] = { + IGC_NETDEV_STAT(rx_errors), + IGC_NETDEV_STAT(tx_errors), + IGC_NETDEV_STAT(tx_dropped), + IGC_NETDEV_STAT(rx_length_errors), + IGC_NETDEV_STAT(rx_over_errors), + IGC_NETDEV_STAT(rx_frame_errors), + IGC_NETDEV_STAT(rx_fifo_errors), + IGC_NETDEV_STAT(tx_fifo_errors), + IGC_NETDEV_STAT(tx_heartbeat_errors) +}; + +enum igc_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; + +#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) + +#define IGC_GLOBAL_STATS_LEN \ + (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) +#define IGC_NETDEV_STATS_LEN \ + (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) +#define IGC_RX_QUEUE_STATS_LEN \ + (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) +#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +#define IGC_QUEUE_STATS_LEN \ + ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGC_RX_QUEUE_STATS_LEN) + \ + (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGC_TX_QUEUE_STATS_LEN)) +#define IGC_STATS_LEN \ + (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) + +static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) + +static void igc_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u16 nvm_version = 0; + u16 gphy_version; + + strscpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); + + /* NVM image version is reported as firmware version for i225 device */ + hw->nvm.ops.read(hw, IGC_NVM_DEV_STARTER, 1, &nvm_version); + + /* gPHY firmware version is reported as PHY FW version */ + gphy_version = igc_read_phy_fw_version(hw); + + scnprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%x:%x", + nvm_version, + gphy_version); + + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; +} + +static int igc_ethtool_get_regs_len(struct net_device *netdev) +{ + return IGC_REGS_LEN * sizeof(u32); +} + +static void igc_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGC_REGS_LEN * sizeof(u32)); + + regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(IGC_CTRL); + regs_buff[1] = rd32(IGC_STATUS); + regs_buff[2] = rd32(IGC_CTRL_EXT); + regs_buff[3] = rd32(IGC_MDIC); + regs_buff[4] = rd32(IGC_CONNSW); + + /* NVM Register */ + regs_buff[5] = rd32(IGC_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[6] = rd32(IGC_EICS); + regs_buff[7] = rd32(IGC_EICS); + regs_buff[8] = rd32(IGC_EIMS); + regs_buff[9] = rd32(IGC_EIMC); + regs_buff[10] = rd32(IGC_EIAC); + regs_buff[11] = rd32(IGC_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[12] = rd32(IGC_ICS); + regs_buff[13] = rd32(IGC_ICS); + regs_buff[14] = rd32(IGC_IMS); + regs_buff[15] = rd32(IGC_IMC); + regs_buff[16] = rd32(IGC_IAC); + regs_buff[17] = rd32(IGC_IAM); + + /* Flow Control */ + regs_buff[18] = rd32(IGC_FCAL); + regs_buff[19] = rd32(IGC_FCAH); + regs_buff[20] = rd32(IGC_FCTTV); + regs_buff[21] = rd32(IGC_FCRTL); + regs_buff[22] = rd32(IGC_FCRTH); + regs_buff[23] = rd32(IGC_FCRTV); + + /* Receive */ + regs_buff[24] = rd32(IGC_RCTL); + regs_buff[25] = rd32(IGC_RXCSUM); + regs_buff[26] = rd32(IGC_RLPML); + regs_buff[27] = rd32(IGC_RFCTL); + + /* Transmit */ + regs_buff[28] = rd32(IGC_TCTL); + regs_buff[29] = rd32(IGC_TIPG); + + /* Wake Up */ + + /* MAC */ + + /* Statistics */ + regs_buff[30] = adapter->stats.crcerrs; + regs_buff[31] = adapter->stats.algnerrc; + regs_buff[32] = adapter->stats.symerrs; + regs_buff[33] = adapter->stats.rxerrc; + regs_buff[34] = adapter->stats.mpc; + regs_buff[35] = adapter->stats.scc; + regs_buff[36] = adapter->stats.ecol; + regs_buff[37] = adapter->stats.mcc; + regs_buff[38] = adapter->stats.latecol; + regs_buff[39] = adapter->stats.colc; + regs_buff[40] = adapter->stats.dc; + regs_buff[41] = adapter->stats.tncrs; + regs_buff[42] = adapter->stats.sec; + regs_buff[43] = adapter->stats.htdpmc; + regs_buff[44] = adapter->stats.rlec; + regs_buff[45] = adapter->stats.xonrxc; + regs_buff[46] = adapter->stats.xontxc; + regs_buff[47] = adapter->stats.xoffrxc; + regs_buff[48] = adapter->stats.xofftxc; + regs_buff[49] = adapter->stats.fcruc; + regs_buff[50] = adapter->stats.prc64; + regs_buff[51] = adapter->stats.prc127; + regs_buff[52] = adapter->stats.prc255; + regs_buff[53] = adapter->stats.prc511; + regs_buff[54] = adapter->stats.prc1023; + regs_buff[55] = adapter->stats.prc1522; + regs_buff[56] = adapter->stats.gprc; + regs_buff[57] = adapter->stats.bprc; + regs_buff[58] = adapter->stats.mprc; + regs_buff[59] = adapter->stats.gptc; + regs_buff[60] = adapter->stats.gorc; + regs_buff[61] = adapter->stats.gotc; + regs_buff[62] = adapter->stats.rnbc; + regs_buff[63] = adapter->stats.ruc; + regs_buff[64] = adapter->stats.rfc; + regs_buff[65] = adapter->stats.roc; + regs_buff[66] = adapter->stats.rjc; + regs_buff[67] = adapter->stats.mgprc; + regs_buff[68] = adapter->stats.mgpdc; + regs_buff[69] = adapter->stats.mgptc; + regs_buff[70] = adapter->stats.tor; + regs_buff[71] = adapter->stats.tot; + regs_buff[72] = adapter->stats.tpr; + regs_buff[73] = adapter->stats.tpt; + regs_buff[74] = adapter->stats.ptc64; + regs_buff[75] = adapter->stats.ptc127; + regs_buff[76] = adapter->stats.ptc255; + regs_buff[77] = adapter->stats.ptc511; + regs_buff[78] = adapter->stats.ptc1023; + regs_buff[79] = adapter->stats.ptc1522; + regs_buff[80] = adapter->stats.mptc; + regs_buff[81] = adapter->stats.bptc; + regs_buff[82] = adapter->stats.tsctc; + regs_buff[83] = adapter->stats.iac; + regs_buff[84] = adapter->stats.rpthc; + regs_buff[85] = adapter->stats.hgptc; + regs_buff[86] = adapter->stats.hgorc; + regs_buff[87] = adapter->stats.hgotc; + regs_buff[88] = adapter->stats.lenerrs; + regs_buff[89] = adapter->stats.scvpc; + regs_buff[90] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[99 + i] = rd32(IGC_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[103 + i] = rd32(IGC_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[107 + i] = rd32(IGC_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[111 + i] = rd32(IGC_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[115 + i] = rd32(IGC_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[123 + i] = rd32(IGC_EITR(i)); + for (i = 0; i < 16; i++) + regs_buff[139 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[145 + i] = rd32(IGC_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(IGC_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[152 + i] = rd32(IGC_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[156 + i] = rd32(IGC_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[160 + i] = rd32(IGC_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[164 + i] = rd32(IGC_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); + + /* XXX: Due to a bug few lines above, RAL and RAH registers are + * overwritten. To preserve the ABI, we write these registers again in + * regs_buff. + */ + for (i = 0; i < 16; i++) + regs_buff[172 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[188 + i] = rd32(IGC_RAH(i)); + + regs_buff[204] = rd32(IGC_VLANPQF); + + for (i = 0; i < 8; i++) + regs_buff[205 + i] = rd32(IGC_ETQF(i)); + + regs_buff[213] = adapter->stats.tlpic; + regs_buff[214] = adapter->stats.rlpic; +} + +static void igc_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & IGC_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IGC_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IGC_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IGC_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & IGC_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igc_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IGC_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IGC_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IGC_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IGC_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= IGC_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static u32 igc_ethtool_get_msglevel(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int igc_ethtool_nway_reset(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + return 0; +} + +static u32 igc_ethtool_get_link(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igc_has_link(adapter); +} + +static int igc_ethtool_get_eeprom_len(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.nvm.word_size * 2; +} + +static int igc_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int first_word, last_word; + u16 *eeprom_buff; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == igc_nvm_eeprom_spi) { + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igc_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int max_len, first_word, last_word, ret_val = 0; + u16 *eeprom_buff; + void *ptr; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (hw->mac.type >= igc_i225 && + !igc_get_flash_presence_i225(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void +igc_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGC_MAX_RXD; + ring->tx_max_pending = IGC_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int +igc_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_ring *temp_ring; + u16 new_rx_count, new_tx_count; + int i, err = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igc_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_tx_count; + err = igc_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igc_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_rx_count; + err = igc_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igc_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igc_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGC_RESETTING, &adapter->state); + return err; +} + +static void igc_ethtool_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == igc_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igc_ethtool_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = igc_fc_default; + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == igc_media_type_copper) ? + igc_force_mac_fc(hw) : igc_setup_link(hw)); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + return retval; +} + +static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igc_gstrings_test, + IGC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string); + for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igc_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igc_priv_flags_strings, + IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGC_STATS_LEN; + case ETH_SS_TEST: + return IGC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGC_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igc_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igc_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igc_gstrings_stats[i].stat_offset; + data[i] = (igc_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; + data[i] = (igc_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i + 1] = ring->tx_stats.bytes; + data[i + 2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); + data[i + 2] += restart2; + + i += IGC_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i + 1] = ring->rx_stats.bytes; + data[i + 2] = ring->rx_stats.drops; + data[i + 3] = ring->rx_stats.csum_err; + data[i + 4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + i += IGC_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static int igc_ethtool_get_previous_rx_coalesce(struct igc_adapter *adapter) +{ + return (adapter->rx_itr_setting <= 3) ? + adapter->rx_itr_setting : adapter->rx_itr_setting >> 2; +} + +static int igc_ethtool_get_previous_tx_coalesce(struct igc_adapter *adapter) +{ + return (adapter->tx_itr_setting <= 3) ? + adapter->tx_itr_setting : adapter->tx_itr_setting >> 2; +} + +static int igc_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ec->rx_coalesce_usecs = igc_ethtool_get_previous_rx_coalesce(adapter); + ec->tx_coalesce_usecs = igc_ethtool_get_previous_tx_coalesce(adapter); + + return 0; +} + +static int igc_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->rx_coalesce_usecs > 3 && + ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->rx_coalesce_usecs == 2) + return -EINVAL; + + if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->tx_coalesce_usecs > 3 && + ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->tx_coalesce_usecs == 2) + return -EINVAL; + + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && + ec->tx_coalesce_usecs != igc_ethtool_get_previous_tx_coalesce(adapter)) { + NL_SET_ERR_MSG_MOD(extack, + "Queue Pair mode enabled, both Rx and Tx coalescing controlled by rx-usecs"); + return -EINVAL; + } + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGC_FLAG_DMAC) + adapter->flags &= ~IGC_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGC_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igc_nfc_rule *rule = NULL; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) + goto out; + + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype); + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) { + fsp->flow_type |= FLOW_EXT; + memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data)); + } + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +out: + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; +} + +static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_nfc_rule *rule; + int cnt = 0; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (cnt == cmd->rule_cnt) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EMSGSIZE; + } + rule_locs[cnt] = rule->location; + cnt++; + } + + mutex_unlock(&adapter->nfc_rule_lock); + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igc */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igc_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + return 0; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_rule_count; + return 0; + case ETHTOOL_GRXCLSRULE: + return igc_ethtool_get_nfc_rule(adapter, cmd); + case ETHTOOL_GRXCLSRLALL: + return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs); + case ETHTOOL_GRXFH: + return igc_ethtool_get_rss_hash_opts(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ + IGC_FLAG_RSS_FIELD_IPV6_UDP) +static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + netdev_err(adapter->netdev, + "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | + IGC_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(IGC_MRQC, mrqc); + } + + return 0; +} + +static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, + const struct ethtool_rx_flow_spec *fsp) +{ + INIT_LIST_HEAD(&rule->list); + + rule->action = fsp->ring_cookie; + rule->location = fsp->location; + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto); + rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; + } + + /* Both source and destination address filters only support the full + * mask. + */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(rule->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(rule->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + /* VLAN etype matching */ + if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) { + rule->filter.vlan_etype = fsp->h_ext.vlan_etype; + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE; + } + + /* Check for user defined data */ + if ((fsp->flow_type & FLOW_EXT) && + (fsp->h_ext.data[0] || fsp->h_ext.data[1])) { + rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA; + memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data)); + memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); + } + + /* When multiple filter options or user data or vlan etype is set, use a + * flex filter. + */ + if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || + (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || + (rule->filter.match_flags & (rule->filter.match_flags - 1))) + rule->flex = true; + else + rule->flex = false; +} + +/** + * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid + * @adapter: Pointer to adapter + * @rule: Rule under evaluation + * + * The driver doesn't support rules with multiple matches so if more than + * one bit in filter flags is set, @rule is considered invalid. + * + * Also, if there is already another rule with the same filter in a different + * location, @rule is considered invalid. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct net_device *dev = adapter->netdev; + u8 flags = rule->filter.match_flags; + struct igc_nfc_rule *tmp; + + if (!flags) { + netdev_dbg(dev, "Rule with no match\n"); + return -EINVAL; + } + + list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { + if (!memcmp(&rule->filter, &tmp->filter, + sizeof(rule->filter)) && + tmp->location != rule->location) { + netdev_dbg(dev, "Rule already exists\n"); + return -EEXIST; + } + } + + return 0; +} + +static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule, *old_rule; + int err; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) { + netdev_dbg(netdev, "N-tuple filters disabled\n"); + return -EOPNOTSUPP; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) { + netdev_dbg(netdev, "Only ethernet flow type is supported\n"); + return -EOPNOTSUPP; + } + + if (fsp->ring_cookie >= adapter->num_rx_queues) { + netdev_dbg(netdev, "Invalid action\n"); + return -EINVAL; + } + + if (fsp->location >= IGC_MAX_RXNFC_RULES) { + netdev_dbg(netdev, "Invalid location\n"); + return -EINVAL; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + igc_ethtool_init_nfc_rule(rule, fsp); + + mutex_lock(&adapter->nfc_rule_lock); + + err = igc_ethtool_check_nfc_rule(adapter, rule); + if (err) + goto err; + + old_rule = igc_get_nfc_rule(adapter, fsp->location); + if (old_rule) + igc_del_nfc_rule(adapter, old_rule); + + err = igc_add_nfc_rule(adapter, rule); + if (err) + goto err; + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +err: + mutex_unlock(&adapter->nfc_rule_lock); + kfree(rule); + return err; +} + +static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; + } + + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; +} + +static int igc_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return igc_ethtool_set_rss_hash_opt(adapter, cmd); + case ETHTOOL_SRXCLSRLINS: + return igc_ethtool_add_nfc_rule(adapter, cmd); + case ETHTOOL_SRXCLSRLDEL: + return igc_ethtool_del_nfc_rule(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +void igc_write_rss_indir_tbl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 reg = IGC_RETA(0); + u32 shift = 0; + int i = 0; + + while (i < IGC_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGC_RETA_SIZE; +} + +static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGC_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 num_queues; + int i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + /* Verify user input. */ + for (i = 0; i < IGC_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + for (i = 0; i < IGC_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igc_write_rss_indir_tbl(adapter); + + return 0; +} + +static void igc_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igc_get_max_rss_queues(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igc_ethtool_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igc_get_max_rss_queues(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igc_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igc_reinit_queues(adapter); + } + + return 0; +} + +static int igc_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case igc_i225: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +static u32 igc_ethtool_get_priv_flags(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGC_FLAG_RX_LEGACY) + priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGC_FLAG_RX_LEGACY; + if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) + flags |= IGC_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + } + + return 0; +} + +static int igc_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 eeer; + + if (hw->dev_spec._base.eee_enable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + *edata = adapter->eee; + edata->supported = SUPPORTED_Autoneg; + + eeer = rd32(IGC_EEER); + + /* EEE status on negotiated link */ + if (eeer & IGC_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & IGC_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + + edata->eee_enabled = hw->dev_spec._base.eee_enable; + + edata->advertised = SUPPORTED_Autoneg; + edata->lp_advertised = SUPPORTED_Autoneg; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igc_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igc_ethtool_get_eee(netdev, &eee_curr); + if (ret_val) { + netdev_err(netdev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + netdev_err(netdev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + netdev_err(netdev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + } else if (!edata->eee_enabled) { + netdev_err(netdev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { + hw->dev_spec._base.eee_enable = edata->eee_enabled; + adapter->flags |= IGC_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + } + + return 0; +} + +static int igc_ethtool_begin(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igc_ethtool_complete(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_put(&adapter->pdev->dev); +} + +static int igc_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 status; + u32 speed; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* supported link modes */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); + + /* twisted pair */ + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + + /* advertising link modes */ + if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + } + + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case igc_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case igc_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case igc_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + break; + } + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both + * 1 Gbps and 2.5 Gbps link modes. + * An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + speed = SPEED_2500; + } else { + speed = SPEED_1000; + } + } else if (status & IGC_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & IGC_STATUS_FD) || + hw->phy.media_type != igc_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if (hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == igc_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int +igc_ethtool_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 advertising; + + /* When adapter in resetting mode, autoneg/speed/duplex + * cannot be changed + */ + if (igc_check_reset_block(hw)) { + netdev_err(dev, "Cannot change link characteristics when reset is active\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && + cmd->base.autoneg != AUTONEG_ENABLE) { + netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. + * We have to check this and convert it to ADVERTISE_2500_FULL + * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. + */ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) + advertising |= ADVERTISE_2500_FULL; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + hw->phy.autoneg_advertised = advertising; + if (adapter->fc_autoneg) + hw->fc.requested_mode = igc_fc_default; + } else { + netdev_info(dev, "Force mode currently not supported\n"); + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +static void igc_ethtool_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + netdev_info(adapter->netdev, "Offline testing starting"); + set_bit(__IGC_TESTING, &adapter->state); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + igc_close(netdev); + else + igc_reset(adapter); + + netdev_info(adapter->netdev, "Register testing starting"); + if (!igc_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + netdev_info(adapter->netdev, "EEPROM testing starting"); + if (!igc_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + /* loopback and interrupt tests + * will be implemented in the future + */ + data[TEST_LOOP] = 0; + data[TEST_IRQ] = 0; + + clear_bit(__IGC_TESTING, &adapter->state); + if (if_running) + igc_open(netdev); + } else { + netdev_info(adapter->netdev, "Online testing starting"); + + /* register, eeprom, intr and loopback tests not run online */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + msleep_interruptible(4 * 1000); +} + +static const struct ethtool_ops igc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igc_ethtool_get_drvinfo, + .get_regs_len = igc_ethtool_get_regs_len, + .get_regs = igc_ethtool_get_regs, + .get_wol = igc_ethtool_get_wol, + .set_wol = igc_ethtool_set_wol, + .get_msglevel = igc_ethtool_get_msglevel, + .set_msglevel = igc_ethtool_set_msglevel, + .nway_reset = igc_ethtool_nway_reset, + .get_link = igc_ethtool_get_link, + .get_eeprom_len = igc_ethtool_get_eeprom_len, + .get_eeprom = igc_ethtool_get_eeprom, + .set_eeprom = igc_ethtool_set_eeprom, + .get_ringparam = igc_ethtool_get_ringparam, + .set_ringparam = igc_ethtool_set_ringparam, + .get_pauseparam = igc_ethtool_get_pauseparam, + .set_pauseparam = igc_ethtool_set_pauseparam, + .get_strings = igc_ethtool_get_strings, + .get_sset_count = igc_ethtool_get_sset_count, + .get_ethtool_stats = igc_ethtool_get_stats, + .get_coalesce = igc_ethtool_get_coalesce, + .set_coalesce = igc_ethtool_set_coalesce, + .get_rxnfc = igc_ethtool_get_rxnfc, + .set_rxnfc = igc_ethtool_set_rxnfc, + .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, + .get_rxfh = igc_ethtool_get_rxfh, + .set_rxfh = igc_ethtool_set_rxfh, + .get_ts_info = igc_ethtool_get_ts_info, + .get_channels = igc_ethtool_get_channels, + .set_channels = igc_ethtool_set_channels, + .get_priv_flags = igc_ethtool_get_priv_flags, + .set_priv_flags = igc_ethtool_set_priv_flags, + .get_eee = igc_ethtool_get_eee, + .set_eee = igc_ethtool_set_eee, + .begin = igc_ethtool_begin, + .complete = igc_ethtool_complete, + .get_link_ksettings = igc_ethtool_get_link_ksettings, + .set_link_ksettings = igc_ethtool_set_link_ksettings, + .self_test = igc_ethtool_diag_test, +}; + +void igc_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igc_ethtool_ops; +} diff --git a/devices/igc/igc_ethtool-6.6-orig.c b/devices/igc/igc_ethtool-6.6-orig.c new file mode 100644 index 00000000..7ab6dd58 --- /dev/null +++ b/devices/igc/igc_ethtool-6.6-orig.c @@ -0,0 +1,1990 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +/* ethtool support for igc */ +#include +#include +#include + +#include "igc.h" +#include "igc_diag.h" + +/* forward declaration */ +struct igc_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \ + .stat_offset = offsetof(struct igc_adapter, _stat) \ +} + +static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_packets", stats.gprc), + IGC_STAT("tx_packets", stats.gptc), + IGC_STAT("rx_bytes", stats.gorc), + IGC_STAT("tx_bytes", stats.gotc), + IGC_STAT("rx_broadcast", stats.bprc), + IGC_STAT("tx_broadcast", stats.bptc), + IGC_STAT("rx_multicast", stats.mprc), + IGC_STAT("tx_multicast", stats.mptc), + IGC_STAT("multicast", stats.mprc), + IGC_STAT("collisions", stats.colc), + IGC_STAT("rx_crc_errors", stats.crcerrs), + IGC_STAT("rx_no_buffer_count", stats.rnbc), + IGC_STAT("rx_missed_errors", stats.mpc), + IGC_STAT("tx_aborted_errors", stats.ecol), + IGC_STAT("tx_carrier_errors", stats.tncrs), + IGC_STAT("tx_window_errors", stats.latecol), + IGC_STAT("tx_abort_late_coll", stats.latecol), + IGC_STAT("tx_deferred_ok", stats.dc), + IGC_STAT("tx_single_coll_ok", stats.scc), + IGC_STAT("tx_multi_coll_ok", stats.mcc), + IGC_STAT("tx_timeout_count", tx_timeout_count), + IGC_STAT("rx_long_length_errors", stats.roc), + IGC_STAT("rx_short_length_errors", stats.ruc), + IGC_STAT("rx_align_errors", stats.algnerrc), + IGC_STAT("tx_tcp_seg_good", stats.tsctc), + IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGC_STAT("rx_flow_control_xon", stats.xonrxc), + IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGC_STAT("tx_flow_control_xon", stats.xontxc), + IGC_STAT("tx_flow_control_xoff", stats.xofftxc), + IGC_STAT("rx_long_byte_count", stats.gorc), + IGC_STAT("tx_dma_out_of_sync", stats.doosync), + IGC_STAT("tx_smbus", stats.mgptc), + IGC_STAT("rx_smbus", stats.mgprc), + IGC_STAT("dropped_smbus", stats.mgpdc), + IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + IGC_STAT("tx_lpi_counter", stats.tlpic), + IGC_STAT("rx_lpi_counter", stats.rlpic), + IGC_STAT("qbv_config_change_errors", qbv_config_change_errors), +}; + +#define IGC_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} + +static const struct igc_stats igc_gstrings_net_stats[] = { + IGC_NETDEV_STAT(rx_errors), + IGC_NETDEV_STAT(tx_errors), + IGC_NETDEV_STAT(tx_dropped), + IGC_NETDEV_STAT(rx_length_errors), + IGC_NETDEV_STAT(rx_over_errors), + IGC_NETDEV_STAT(rx_frame_errors), + IGC_NETDEV_STAT(rx_fifo_errors), + IGC_NETDEV_STAT(tx_fifo_errors), + IGC_NETDEV_STAT(tx_heartbeat_errors) +}; + +enum igc_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; + +#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) + +#define IGC_GLOBAL_STATS_LEN \ + (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) +#define IGC_NETDEV_STATS_LEN \ + (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) +#define IGC_RX_QUEUE_STATS_LEN \ + (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) +#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +#define IGC_QUEUE_STATS_LEN \ + ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGC_RX_QUEUE_STATS_LEN) + \ + (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGC_TX_QUEUE_STATS_LEN)) +#define IGC_STATS_LEN \ + (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) + +static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) + +static void igc_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u16 nvm_version = 0; + u16 gphy_version; + + strscpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); + + /* NVM image version is reported as firmware version for i225 device */ + hw->nvm.ops.read(hw, IGC_NVM_DEV_STARTER, 1, &nvm_version); + + /* gPHY firmware version is reported as PHY FW version */ + gphy_version = igc_read_phy_fw_version(hw); + + scnprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%x:%x", + nvm_version, + gphy_version); + + strscpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; +} + +static int igc_ethtool_get_regs_len(struct net_device *netdev) +{ + return IGC_REGS_LEN * sizeof(u32); +} + +static void igc_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGC_REGS_LEN * sizeof(u32)); + + regs->version = (2u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(IGC_CTRL); + regs_buff[1] = rd32(IGC_STATUS); + regs_buff[2] = rd32(IGC_CTRL_EXT); + regs_buff[3] = rd32(IGC_MDIC); + regs_buff[4] = rd32(IGC_CONNSW); + + /* NVM Register */ + regs_buff[5] = rd32(IGC_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[6] = rd32(IGC_EICS); + regs_buff[7] = rd32(IGC_EICS); + regs_buff[8] = rd32(IGC_EIMS); + regs_buff[9] = rd32(IGC_EIMC); + regs_buff[10] = rd32(IGC_EIAC); + regs_buff[11] = rd32(IGC_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[12] = rd32(IGC_ICS); + regs_buff[13] = rd32(IGC_ICS); + regs_buff[14] = rd32(IGC_IMS); + regs_buff[15] = rd32(IGC_IMC); + regs_buff[16] = rd32(IGC_IAC); + regs_buff[17] = rd32(IGC_IAM); + + /* Flow Control */ + regs_buff[18] = rd32(IGC_FCAL); + regs_buff[19] = rd32(IGC_FCAH); + regs_buff[20] = rd32(IGC_FCTTV); + regs_buff[21] = rd32(IGC_FCRTL); + regs_buff[22] = rd32(IGC_FCRTH); + regs_buff[23] = rd32(IGC_FCRTV); + + /* Receive */ + regs_buff[24] = rd32(IGC_RCTL); + regs_buff[25] = rd32(IGC_RXCSUM); + regs_buff[26] = rd32(IGC_RLPML); + regs_buff[27] = rd32(IGC_RFCTL); + + /* Transmit */ + regs_buff[28] = rd32(IGC_TCTL); + regs_buff[29] = rd32(IGC_TIPG); + + /* Wake Up */ + + /* MAC */ + + /* Statistics */ + regs_buff[30] = adapter->stats.crcerrs; + regs_buff[31] = adapter->stats.algnerrc; + regs_buff[32] = adapter->stats.symerrs; + regs_buff[33] = adapter->stats.rxerrc; + regs_buff[34] = adapter->stats.mpc; + regs_buff[35] = adapter->stats.scc; + regs_buff[36] = adapter->stats.ecol; + regs_buff[37] = adapter->stats.mcc; + regs_buff[38] = adapter->stats.latecol; + regs_buff[39] = adapter->stats.colc; + regs_buff[40] = adapter->stats.dc; + regs_buff[41] = adapter->stats.tncrs; + regs_buff[42] = adapter->stats.sec; + regs_buff[43] = adapter->stats.htdpmc; + regs_buff[44] = adapter->stats.rlec; + regs_buff[45] = adapter->stats.xonrxc; + regs_buff[46] = adapter->stats.xontxc; + regs_buff[47] = adapter->stats.xoffrxc; + regs_buff[48] = adapter->stats.xofftxc; + regs_buff[49] = adapter->stats.fcruc; + regs_buff[50] = adapter->stats.prc64; + regs_buff[51] = adapter->stats.prc127; + regs_buff[52] = adapter->stats.prc255; + regs_buff[53] = adapter->stats.prc511; + regs_buff[54] = adapter->stats.prc1023; + regs_buff[55] = adapter->stats.prc1522; + regs_buff[56] = adapter->stats.gprc; + regs_buff[57] = adapter->stats.bprc; + regs_buff[58] = adapter->stats.mprc; + regs_buff[59] = adapter->stats.gptc; + regs_buff[60] = adapter->stats.gorc; + regs_buff[61] = adapter->stats.gotc; + regs_buff[62] = adapter->stats.rnbc; + regs_buff[63] = adapter->stats.ruc; + regs_buff[64] = adapter->stats.rfc; + regs_buff[65] = adapter->stats.roc; + regs_buff[66] = adapter->stats.rjc; + regs_buff[67] = adapter->stats.mgprc; + regs_buff[68] = adapter->stats.mgpdc; + regs_buff[69] = adapter->stats.mgptc; + regs_buff[70] = adapter->stats.tor; + regs_buff[71] = adapter->stats.tot; + regs_buff[72] = adapter->stats.tpr; + regs_buff[73] = adapter->stats.tpt; + regs_buff[74] = adapter->stats.ptc64; + regs_buff[75] = adapter->stats.ptc127; + regs_buff[76] = adapter->stats.ptc255; + regs_buff[77] = adapter->stats.ptc511; + regs_buff[78] = adapter->stats.ptc1023; + regs_buff[79] = adapter->stats.ptc1522; + regs_buff[80] = adapter->stats.mptc; + regs_buff[81] = adapter->stats.bptc; + regs_buff[82] = adapter->stats.tsctc; + regs_buff[83] = adapter->stats.iac; + regs_buff[84] = adapter->stats.rpthc; + regs_buff[85] = adapter->stats.hgptc; + regs_buff[86] = adapter->stats.hgorc; + regs_buff[87] = adapter->stats.hgotc; + regs_buff[88] = adapter->stats.lenerrs; + regs_buff[89] = adapter->stats.scvpc; + regs_buff[90] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[99 + i] = rd32(IGC_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[103 + i] = rd32(IGC_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[107 + i] = rd32(IGC_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[111 + i] = rd32(IGC_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[115 + i] = rd32(IGC_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[123 + i] = rd32(IGC_EITR(i)); + for (i = 0; i < 16; i++) + regs_buff[139 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[145 + i] = rd32(IGC_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(IGC_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[152 + i] = rd32(IGC_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[156 + i] = rd32(IGC_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[160 + i] = rd32(IGC_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[164 + i] = rd32(IGC_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); + + /* XXX: Due to a bug few lines above, RAL and RAH registers are + * overwritten. To preserve the ABI, we write these registers again in + * regs_buff. + */ + for (i = 0; i < 16; i++) + regs_buff[172 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[188 + i] = rd32(IGC_RAH(i)); + + regs_buff[204] = rd32(IGC_VLANPQF); + + for (i = 0; i < 8; i++) + regs_buff[205 + i] = rd32(IGC_ETQF(i)); + + regs_buff[213] = adapter->stats.tlpic; + regs_buff[214] = adapter->stats.rlpic; +} + +static void igc_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & IGC_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IGC_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IGC_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IGC_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & IGC_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igc_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IGC_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IGC_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IGC_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IGC_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= IGC_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static u32 igc_ethtool_get_msglevel(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void igc_ethtool_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int igc_ethtool_nway_reset(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + return 0; +} + +static u32 igc_ethtool_get_link(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igc_has_link(adapter); +} + +static int igc_ethtool_get_eeprom_len(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.nvm.word_size * 2; +} + +static int igc_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int first_word, last_word; + u16 *eeprom_buff; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == igc_nvm_eeprom_spi) { + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igc_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int max_len, first_word, last_word, ret_val = 0; + u16 *eeprom_buff; + void *ptr; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (hw->mac.type >= igc_i225 && + !igc_get_flash_presence_i225(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void +igc_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGC_MAX_RXD; + ring->tx_max_pending = IGC_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int +igc_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_ring *temp_ring; + u16 new_rx_count, new_tx_count; + int i, err = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igc_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_tx_count; + err = igc_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igc_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_rx_count; + err = igc_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igc_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igc_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGC_RESETTING, &adapter->state); + return err; +} + +static void igc_ethtool_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == igc_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igc_ethtool_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = igc_fc_default; + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == igc_media_type_copper) ? + igc_force_mac_fc(hw) : igc_setup_link(hw)); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + return retval; +} + +static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igc_gstrings_test, + IGC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string); + for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) + ethtool_sprintf(&p, + igc_gstrings_net_stats[i].stat_string); + for (i = 0; i < adapter->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_restart", i); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + ethtool_sprintf(&p, "rx_queue_%u_drops", i); + ethtool_sprintf(&p, "rx_queue_%u_csum_err", i); + ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i); + } + /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igc_priv_flags_strings, + IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGC_STATS_LEN; + case ETH_SS_TEST: + return IGC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGC_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igc_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igc_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igc_gstrings_stats[i].stat_offset; + data[i] = (igc_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; + data[i] = (igc_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i + 1] = ring->tx_stats.bytes; + data[i + 2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry(&ring->tx_syncp2, start)); + data[i + 2] += restart2; + + i += IGC_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i + 1] = ring->rx_stats.bytes; + data[i + 2] = ring->rx_stats.drops; + data[i + 3] = ring->rx_stats.csum_err; + data[i + 4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + i += IGC_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static int igc_ethtool_get_previous_rx_coalesce(struct igc_adapter *adapter) +{ + return (adapter->rx_itr_setting <= 3) ? + adapter->rx_itr_setting : adapter->rx_itr_setting >> 2; +} + +static int igc_ethtool_get_previous_tx_coalesce(struct igc_adapter *adapter) +{ + return (adapter->tx_itr_setting <= 3) ? + adapter->tx_itr_setting : adapter->tx_itr_setting >> 2; +} + +static int igc_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ec->rx_coalesce_usecs = igc_ethtool_get_previous_rx_coalesce(adapter); + ec->tx_coalesce_usecs = igc_ethtool_get_previous_tx_coalesce(adapter); + + return 0; +} + +static int igc_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->rx_coalesce_usecs > 3 && + ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->rx_coalesce_usecs == 2) + return -EINVAL; + + if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->tx_coalesce_usecs > 3 && + ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->tx_coalesce_usecs == 2) + return -EINVAL; + + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && + ec->tx_coalesce_usecs != igc_ethtool_get_previous_tx_coalesce(adapter)) { + NL_SET_ERR_MSG_MOD(extack, + "Queue Pair mode enabled, both Rx and Tx coalescing controlled by rx-usecs"); + return -EINVAL; + } + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGC_FLAG_DMAC) + adapter->flags &= ~IGC_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGC_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igc_nfc_rule *rule = NULL; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) + goto out; + + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = htons(rule->filter.etype); + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) { + fsp->flow_type |= FLOW_EXT; + memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data)); + } + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +out: + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; +} + +static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_nfc_rule *rule; + int cnt = 0; + + cmd->data = IGC_MAX_RXNFC_RULES; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (cnt == cmd->rule_cnt) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EMSGSIZE; + } + rule_locs[cnt] = rule->location; + cnt++; + } + + mutex_unlock(&adapter->nfc_rule_lock); + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igc */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V6_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igc_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + return 0; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_rule_count; + return 0; + case ETHTOOL_GRXCLSRULE: + return igc_ethtool_get_nfc_rule(adapter, cmd); + case ETHTOOL_GRXCLSRLALL: + return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs); + case ETHTOOL_GRXFH: + return igc_ethtool_get_rss_hash_opts(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ + IGC_FLAG_RSS_FIELD_IPV6_UDP) +static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + netdev_err(adapter->netdev, + "Enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | + IGC_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(IGC_MRQC, mrqc); + } + + return 0; +} + +static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, + const struct ethtool_rx_flow_spec *fsp) +{ + INIT_LIST_HEAD(&rule->list); + + rule->action = fsp->ring_cookie; + rule->location = fsp->location; + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + rule->filter.etype = ntohs(fsp->h_u.ether_spec.h_proto); + rule->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; + } + + /* Both source and destination address filters only support the full + * mask. + */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(rule->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + rule->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(rule->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + /* VLAN etype matching */ + if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) { + rule->filter.vlan_etype = fsp->h_ext.vlan_etype; + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE; + } + + /* Check for user defined data */ + if ((fsp->flow_type & FLOW_EXT) && + (fsp->h_ext.data[0] || fsp->h_ext.data[1])) { + rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA; + memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data)); + memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); + } + + /* When multiple filter options or user data or vlan etype is set, use a + * flex filter. + */ + if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || + (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || + (rule->filter.match_flags & (rule->filter.match_flags - 1))) + rule->flex = true; + else + rule->flex = false; +} + +/** + * igc_ethtool_check_nfc_rule() - Check if NFC rule is valid + * @adapter: Pointer to adapter + * @rule: Rule under evaluation + * + * The driver doesn't support rules with multiple matches so if more than + * one bit in filter flags is set, @rule is considered invalid. + * + * Also, if there is already another rule with the same filter in a different + * location, @rule is considered invalid. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct net_device *dev = adapter->netdev; + u8 flags = rule->filter.match_flags; + struct igc_nfc_rule *tmp; + + if (!flags) { + netdev_dbg(dev, "Rule with no match\n"); + return -EINVAL; + } + + list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { + if (!memcmp(&rule->filter, &tmp->filter, + sizeof(rule->filter)) && + tmp->location != rule->location) { + netdev_dbg(dev, "Rule already exists\n"); + return -EEXIST; + } + } + + return 0; +} + +static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule, *old_rule; + int err; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) { + netdev_dbg(netdev, "N-tuple filters disabled\n"); + return -EOPNOTSUPP; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) { + netdev_dbg(netdev, "Only ethernet flow type is supported\n"); + return -EOPNOTSUPP; + } + + if (fsp->ring_cookie >= adapter->num_rx_queues) { + netdev_dbg(netdev, "Invalid action\n"); + return -EINVAL; + } + + if (fsp->location >= IGC_MAX_RXNFC_RULES) { + netdev_dbg(netdev, "Invalid location\n"); + return -EINVAL; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + igc_ethtool_init_nfc_rule(rule, fsp); + + mutex_lock(&adapter->nfc_rule_lock); + + err = igc_ethtool_check_nfc_rule(adapter, rule); + if (err) + goto err; + + old_rule = igc_get_nfc_rule(adapter, fsp->location); + if (old_rule) + igc_del_nfc_rule(adapter, old_rule); + + err = igc_add_nfc_rule(adapter, rule); + if (err) + goto err; + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; + +err: + mutex_unlock(&adapter->nfc_rule_lock); + kfree(rule); + return err; +} + +static int igc_ethtool_del_nfc_rule(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + rule = igc_get_nfc_rule(adapter, fsp->location); + if (!rule) { + mutex_unlock(&adapter->nfc_rule_lock); + return -EINVAL; + } + + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); + return 0; +} + +static int igc_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return igc_ethtool_set_rss_hash_opt(adapter, cmd); + case ETHTOOL_SRXCLSRLINS: + return igc_ethtool_add_nfc_rule(adapter, cmd); + case ETHTOOL_SRXCLSRLDEL: + return igc_ethtool_del_nfc_rule(adapter, cmd); + default: + return -EOPNOTSUPP; + } +} + +void igc_write_rss_indir_tbl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 reg = IGC_RETA(0); + u32 shift = 0; + int i = 0; + + while (i < IGC_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGC_RETA_SIZE; +} + +static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGC_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 num_queues; + int i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + /* Verify user input. */ + for (i = 0; i < IGC_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + for (i = 0; i < IGC_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igc_write_rss_indir_tbl(adapter); + + return 0; +} + +static void igc_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igc_get_max_rss_queues(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igc_ethtool_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igc_get_max_rss_queues(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igc_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igc_reinit_queues(adapter); + } + + return 0; +} + +static int igc_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case igc_i225: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +static u32 igc_ethtool_get_priv_flags(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGC_FLAG_RX_LEGACY) + priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGC_FLAG_RX_LEGACY; + if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) + flags |= IGC_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + } + + return 0; +} + +static int igc_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 eeer; + + if (hw->dev_spec._base.eee_enable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + *edata = adapter->eee; + edata->supported = SUPPORTED_Autoneg; + + eeer = rd32(IGC_EEER); + + /* EEE status on negotiated link */ + if (eeer & IGC_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & IGC_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + + edata->eee_enabled = hw->dev_spec._base.eee_enable; + + edata->advertised = SUPPORTED_Autoneg; + edata->lp_advertised = SUPPORTED_Autoneg; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igc_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igc_ethtool_get_eee(netdev, &eee_curr); + if (ret_val) { + netdev_err(netdev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + netdev_err(netdev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + netdev_err(netdev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + } else if (!edata->eee_enabled) { + netdev_err(netdev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { + hw->dev_spec._base.eee_enable = edata->eee_enabled; + adapter->flags |= IGC_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + } + + return 0; +} + +static int igc_ethtool_begin(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igc_ethtool_complete(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_put(&adapter->pdev->dev); +} + +static int igc_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 status; + u32 speed; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* supported link modes */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); + + /* twisted pair */ + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + + /* advertising link modes */ + if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + } + + /* Set pause flow control settings */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + switch (hw->fc.requested_mode) { + case igc_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case igc_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case igc_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + break; + } + + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both + * 1 Gbps and 2.5 Gbps link modes. + * An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + speed = SPEED_2500; + } else { + speed = SPEED_1000; + } + } else if (status & IGC_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & IGC_STATUS_FD) || + hw->phy.media_type != igc_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if (hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == igc_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int +igc_ethtool_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 advertising; + + /* When adapter in resetting mode, autoneg/speed/duplex + * cannot be changed + */ + if (igc_check_reset_block(hw)) { + netdev_err(dev, "Cannot change link characteristics when reset is active\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && + cmd->base.autoneg != AUTONEG_ENABLE) { + netdev_err(dev, "Forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. + * We have to check this and convert it to ADVERTISE_2500_FULL + * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. + */ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) + advertising |= ADVERTISE_2500_FULL; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + hw->phy.autoneg_advertised = advertising; + if (adapter->fc_autoneg) + hw->fc.requested_mode = igc_fc_default; + } else { + netdev_info(dev, "Force mode currently not supported\n"); + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +static void igc_ethtool_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + netdev_info(adapter->netdev, "Offline testing starting"); + set_bit(__IGC_TESTING, &adapter->state); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + igc_close(netdev); + else + igc_reset(adapter); + + netdev_info(adapter->netdev, "Register testing starting"); + if (!igc_reg_test(adapter, &data[TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + netdev_info(adapter->netdev, "EEPROM testing starting"); + if (!igc_eeprom_test(adapter, &data[TEST_EEP])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igc_reset(adapter); + + /* loopback and interrupt tests + * will be implemented in the future + */ + data[TEST_LOOP] = 0; + data[TEST_IRQ] = 0; + + clear_bit(__IGC_TESTING, &adapter->state); + if (if_running) + igc_open(netdev); + } else { + netdev_info(adapter->netdev, "Online testing starting"); + + /* register, eeprom, intr and loopback tests not run online */ + data[TEST_REG] = 0; + data[TEST_EEP] = 0; + data[TEST_IRQ] = 0; + data[TEST_LOOP] = 0; + + if (!igc_link_test(adapter, &data[TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + msleep_interruptible(4 * 1000); +} + +static const struct ethtool_ops igc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = igc_ethtool_get_drvinfo, + .get_regs_len = igc_ethtool_get_regs_len, + .get_regs = igc_ethtool_get_regs, + .get_wol = igc_ethtool_get_wol, + .set_wol = igc_ethtool_set_wol, + .get_msglevel = igc_ethtool_get_msglevel, + .set_msglevel = igc_ethtool_set_msglevel, + .nway_reset = igc_ethtool_nway_reset, + .get_link = igc_ethtool_get_link, + .get_eeprom_len = igc_ethtool_get_eeprom_len, + .get_eeprom = igc_ethtool_get_eeprom, + .set_eeprom = igc_ethtool_set_eeprom, + .get_ringparam = igc_ethtool_get_ringparam, + .set_ringparam = igc_ethtool_set_ringparam, + .get_pauseparam = igc_ethtool_get_pauseparam, + .set_pauseparam = igc_ethtool_set_pauseparam, + .get_strings = igc_ethtool_get_strings, + .get_sset_count = igc_ethtool_get_sset_count, + .get_ethtool_stats = igc_ethtool_get_stats, + .get_coalesce = igc_ethtool_get_coalesce, + .set_coalesce = igc_ethtool_set_coalesce, + .get_rxnfc = igc_ethtool_get_rxnfc, + .set_rxnfc = igc_ethtool_set_rxnfc, + .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, + .get_rxfh = igc_ethtool_get_rxfh, + .set_rxfh = igc_ethtool_set_rxfh, + .get_ts_info = igc_ethtool_get_ts_info, + .get_channels = igc_ethtool_get_channels, + .set_channels = igc_ethtool_set_channels, + .get_priv_flags = igc_ethtool_get_priv_flags, + .set_priv_flags = igc_ethtool_set_priv_flags, + .get_eee = igc_ethtool_get_eee, + .set_eee = igc_ethtool_set_eee, + .begin = igc_ethtool_begin, + .complete = igc_ethtool_complete, + .get_link_ksettings = igc_ethtool_get_link_ksettings, + .set_link_ksettings = igc_ethtool_set_link_ksettings, + .self_test = igc_ethtool_diag_test, +}; + +void igc_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igc_ethtool_ops; +} diff --git a/devices/igc/igc_hw-6.4-ethercat.h b/devices/igc/igc_hw-6.4-ethercat.h new file mode 100644 index 00000000..56408bbd --- /dev/null +++ b/devices/igc/igc_hw-6.4-ethercat.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_HW_H_ +#define _IGC_HW_H_ + +#include +#include +#include + +#include "igc_regs-6.4-ethercat.h" +#include "igc_defines-6.4-ethercat.h" +#include "igc_mac-6.4-ethercat.h" +#include "igc_phy-6.4-ethercat.h" +#include "igc_nvm-6.4-ethercat.h" +#include "igc_i225-6.4-ethercat.h" +#include "igc_base-6.4-ethercat.h" + +#define IGC_DEV_ID_I225_LM 0x15F2 +#define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_K 0x3100 +#define IGC_DEV_ID_I225_K2 0x3101 +#define IGC_DEV_ID_I226_K 0x3102 +#define IGC_DEV_ID_I225_LMVP 0x5502 +#define IGC_DEV_ID_I226_LMVP 0x5503 +#define IGC_DEV_ID_I225_IT 0x0D9F +#define IGC_DEV_ID_I226_LM 0x125B +#define IGC_DEV_ID_I226_V 0x125C +#define IGC_DEV_ID_I226_IT 0x125D +#define IGC_DEV_ID_I221_V 0x125E +#define IGC_DEV_ID_I226_BLANK_NVM 0x125F +#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD + +/* Function pointers for the MAC. */ +struct igc_mac_operations { + s32 (*check_for_link)(struct igc_hw *hw); + s32 (*reset_hw)(struct igc_hw *hw); + s32 (*init_hw)(struct igc_hw *hw); + s32 (*setup_physical_interface)(struct igc_hw *hw); + void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index); + s32 (*read_mac_addr)(struct igc_hw *hw); + s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed, + u16 *duplex); + s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask); + void (*release_swfw_sync)(struct igc_hw *hw, u16 mask); +}; + +enum igc_mac_type { + igc_undefined = 0, + igc_i225, + igc_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum igc_media_type { + igc_media_type_unknown = 0, + igc_media_type_copper = 1, + igc_num_media_types +}; + +enum igc_nvm_type { + igc_nvm_unknown = 0, + igc_nvm_eeprom_spi, +}; + +struct igc_info { + s32 (*get_invariants)(struct igc_hw *hw); + struct igc_mac_operations *mac_ops; + const struct igc_phy_operations *phy_ops; + struct igc_nvm_operations *nvm_ops; +}; + +extern const struct igc_info igc_base_info; + +struct igc_mac_info { + struct igc_mac_operations ops; + + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum igc_mac_type type; + + u32 mc_filter_type; + + u16 mta_reg_count; + u16 uta_reg_count; + + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + bool asf_firmware_present; + bool arc_subsystem_valid; + + bool autoneg; + bool autoneg_failed; + bool get_link_status; +}; + +struct igc_nvm_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + s32 (*update)(struct igc_hw *hw); + s32 (*validate)(struct igc_hw *hw); +}; + +struct igc_phy_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*check_reset_block)(struct igc_hw *hw); + s32 (*force_speed_duplex)(struct igc_hw *hw); + s32 (*get_phy_info)(struct igc_hw *hw); + s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*reset)(struct igc_hw *hw); + s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data); +}; + +struct igc_nvm_info { + struct igc_nvm_operations ops; + enum igc_nvm_type type; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct igc_phy_info { + struct igc_phy_operations ops; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum igc_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + + u8 mdix; + + bool is_mdix; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct igc_bus_info { + u16 func; + u16 pci_cmd_word; +}; + +enum igc_fc_mode { + igc_fc_none = 0, + igc_fc_rx_pause, + igc_fc_tx_pause, + igc_fc_full, + igc_fc_default = 0xFF +}; + +struct igc_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum igc_fc_mode current_mode; /* Type of flow control */ + enum igc_fc_mode requested_mode; +}; + +struct igc_dev_spec_base { + bool clear_semaphore_once; + bool eee_enable; +}; + +struct igc_hw { + void *back; + + u8 __iomem *hw_addr; + unsigned long io_base; + + struct igc_mac_info mac; + struct igc_fc_info fc; + struct igc_nvm_info nvm; + struct igc_phy_info phy; + + struct igc_bus_info bus; + + union { + struct igc_dev_spec_base _base; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* Statistics counters collected by the MAC */ +struct igc_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 tlpic; + u64 rlpic; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 htdpmc; + u64 rpthc; + u64 hgptc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; + u64 txdrop; +}; + +struct net_device *igc_get_hw_dev(struct igc_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igc_get_hw_dev(hw), format, ##arg) + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); + +#endif /* _IGC_HW_H_ */ diff --git a/devices/igc/igc_hw-6.4-orig.h b/devices/igc/igc_hw-6.4-orig.h new file mode 100644 index 00000000..e1c572e0 --- /dev/null +++ b/devices/igc/igc_hw-6.4-orig.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_HW_H_ +#define _IGC_HW_H_ + +#include +#include +#include + +#include "igc_regs.h" +#include "igc_defines.h" +#include "igc_mac.h" +#include "igc_phy.h" +#include "igc_nvm.h" +#include "igc_i225.h" +#include "igc_base.h" + +#define IGC_DEV_ID_I225_LM 0x15F2 +#define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_K 0x3100 +#define IGC_DEV_ID_I225_K2 0x3101 +#define IGC_DEV_ID_I226_K 0x3102 +#define IGC_DEV_ID_I225_LMVP 0x5502 +#define IGC_DEV_ID_I226_LMVP 0x5503 +#define IGC_DEV_ID_I225_IT 0x0D9F +#define IGC_DEV_ID_I226_LM 0x125B +#define IGC_DEV_ID_I226_V 0x125C +#define IGC_DEV_ID_I226_IT 0x125D +#define IGC_DEV_ID_I221_V 0x125E +#define IGC_DEV_ID_I226_BLANK_NVM 0x125F +#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD + +/* Function pointers for the MAC. */ +struct igc_mac_operations { + s32 (*check_for_link)(struct igc_hw *hw); + s32 (*reset_hw)(struct igc_hw *hw); + s32 (*init_hw)(struct igc_hw *hw); + s32 (*setup_physical_interface)(struct igc_hw *hw); + void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index); + s32 (*read_mac_addr)(struct igc_hw *hw); + s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed, + u16 *duplex); + s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask); + void (*release_swfw_sync)(struct igc_hw *hw, u16 mask); +}; + +enum igc_mac_type { + igc_undefined = 0, + igc_i225, + igc_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum igc_media_type { + igc_media_type_unknown = 0, + igc_media_type_copper = 1, + igc_num_media_types +}; + +enum igc_nvm_type { + igc_nvm_unknown = 0, + igc_nvm_eeprom_spi, +}; + +struct igc_info { + s32 (*get_invariants)(struct igc_hw *hw); + struct igc_mac_operations *mac_ops; + const struct igc_phy_operations *phy_ops; + struct igc_nvm_operations *nvm_ops; +}; + +extern const struct igc_info igc_base_info; + +struct igc_mac_info { + struct igc_mac_operations ops; + + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum igc_mac_type type; + + u32 mc_filter_type; + + u16 mta_reg_count; + u16 uta_reg_count; + + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + bool asf_firmware_present; + bool arc_subsystem_valid; + + bool autoneg; + bool autoneg_failed; + bool get_link_status; +}; + +struct igc_nvm_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + s32 (*update)(struct igc_hw *hw); + s32 (*validate)(struct igc_hw *hw); +}; + +struct igc_phy_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*check_reset_block)(struct igc_hw *hw); + s32 (*force_speed_duplex)(struct igc_hw *hw); + s32 (*get_phy_info)(struct igc_hw *hw); + s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*reset)(struct igc_hw *hw); + s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data); +}; + +struct igc_nvm_info { + struct igc_nvm_operations ops; + enum igc_nvm_type type; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct igc_phy_info { + struct igc_phy_operations ops; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum igc_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + + u8 mdix; + + bool is_mdix; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct igc_bus_info { + u16 func; + u16 pci_cmd_word; +}; + +enum igc_fc_mode { + igc_fc_none = 0, + igc_fc_rx_pause, + igc_fc_tx_pause, + igc_fc_full, + igc_fc_default = 0xFF +}; + +struct igc_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum igc_fc_mode current_mode; /* Type of flow control */ + enum igc_fc_mode requested_mode; +}; + +struct igc_dev_spec_base { + bool clear_semaphore_once; + bool eee_enable; +}; + +struct igc_hw { + void *back; + + u8 __iomem *hw_addr; + unsigned long io_base; + + struct igc_mac_info mac; + struct igc_fc_info fc; + struct igc_nvm_info nvm; + struct igc_phy_info phy; + + struct igc_bus_info bus; + + union { + struct igc_dev_spec_base _base; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* Statistics counters collected by the MAC */ +struct igc_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 tlpic; + u64 rlpic; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 htdpmc; + u64 rpthc; + u64 hgptc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; + u64 txdrop; +}; + +struct net_device *igc_get_hw_dev(struct igc_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igc_get_hw_dev(hw), format, ##arg) + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); + +#endif /* _IGC_HW_H_ */ diff --git a/devices/igc/igc_hw-6.6-ethercat.h b/devices/igc/igc_hw-6.6-ethercat.h new file mode 100644 index 00000000..72b7dfaf --- /dev/null +++ b/devices/igc/igc_hw-6.6-ethercat.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_HW_H_ +#define _IGC_HW_H_ + +#include +#include +#include + +#include "igc_regs-6.6-ethercat.h" +#include "igc_defines-6.6-ethercat.h" +#include "igc_mac-6.6-ethercat.h" +#include "igc_phy-6.6-ethercat.h" +#include "igc_nvm-6.6-ethercat.h" +#include "igc_i225-6.6-ethercat.h" +#include "igc_base-6.6-ethercat.h" + +#define IGC_DEV_ID_I225_LM 0x15F2 +#define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_K 0x3100 +#define IGC_DEV_ID_I225_K2 0x3101 +#define IGC_DEV_ID_I226_K 0x3102 +#define IGC_DEV_ID_I225_LMVP 0x5502 +#define IGC_DEV_ID_I226_LMVP 0x5503 +#define IGC_DEV_ID_I225_IT 0x0D9F +#define IGC_DEV_ID_I226_LM 0x125B +#define IGC_DEV_ID_I226_V 0x125C +#define IGC_DEV_ID_I226_IT 0x125D +#define IGC_DEV_ID_I221_V 0x125E +#define IGC_DEV_ID_I226_BLANK_NVM 0x125F +#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD + +/* Function pointers for the MAC. */ +struct igc_mac_operations { + s32 (*check_for_link)(struct igc_hw *hw); + s32 (*reset_hw)(struct igc_hw *hw); + s32 (*init_hw)(struct igc_hw *hw); + s32 (*setup_physical_interface)(struct igc_hw *hw); + void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index); + s32 (*read_mac_addr)(struct igc_hw *hw); + s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed, + u16 *duplex); + s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask); + void (*release_swfw_sync)(struct igc_hw *hw, u16 mask); +}; + +enum igc_mac_type { + igc_undefined = 0, + igc_i225, + igc_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum igc_media_type { + igc_media_type_unknown = 0, + igc_media_type_copper = 1, + igc_num_media_types +}; + +enum igc_nvm_type { + igc_nvm_unknown = 0, + igc_nvm_eeprom_spi, +}; + +struct igc_info { + s32 (*get_invariants)(struct igc_hw *hw); + struct igc_mac_operations *mac_ops; + const struct igc_phy_operations *phy_ops; + struct igc_nvm_operations *nvm_ops; +}; + +extern const struct igc_info igc_base_info; + +struct igc_mac_info { + struct igc_mac_operations ops; + + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum igc_mac_type type; + + u32 mc_filter_type; + + u16 mta_reg_count; + u16 uta_reg_count; + + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + bool asf_firmware_present; + bool arc_subsystem_valid; + + bool autoneg; + bool autoneg_failed; + bool get_link_status; +}; + +struct igc_nvm_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + s32 (*update)(struct igc_hw *hw); + s32 (*validate)(struct igc_hw *hw); +}; + +struct igc_phy_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*check_reset_block)(struct igc_hw *hw); + s32 (*force_speed_duplex)(struct igc_hw *hw); + s32 (*get_phy_info)(struct igc_hw *hw); + s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*reset)(struct igc_hw *hw); + s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data); +}; + +struct igc_nvm_info { + struct igc_nvm_operations ops; + enum igc_nvm_type type; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct igc_phy_info { + struct igc_phy_operations ops; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum igc_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + + u8 mdix; + + bool is_mdix; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct igc_bus_info { + u16 func; + u16 pci_cmd_word; +}; + +enum igc_fc_mode { + igc_fc_none = 0, + igc_fc_rx_pause, + igc_fc_tx_pause, + igc_fc_full, + igc_fc_default = 0xFF +}; + +struct igc_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum igc_fc_mode current_mode; /* Type of flow control */ + enum igc_fc_mode requested_mode; +}; + +struct igc_dev_spec_base { + bool clear_semaphore_once; + bool eee_enable; +}; + +struct igc_hw { + void *back; + + u8 __iomem *hw_addr; + unsigned long io_base; + + struct igc_mac_info mac; + struct igc_fc_info fc; + struct igc_nvm_info nvm; + struct igc_phy_info phy; + + struct igc_bus_info bus; + + union { + struct igc_dev_spec_base _base; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* Statistics counters collected by the MAC */ +struct igc_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 tlpic; + u64 rlpic; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 htdpmc; + u64 rpthc; + u64 hgptc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; + u64 txdrop; +}; + +struct net_device *igc_get_hw_dev(struct igc_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igc_get_hw_dev(hw), format, ##arg) + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); + +#endif /* _IGC_HW_H_ */ diff --git a/devices/igc/igc_hw-6.6-orig.h b/devices/igc/igc_hw-6.6-orig.h new file mode 100644 index 00000000..e1c572e0 --- /dev/null +++ b/devices/igc/igc_hw-6.6-orig.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_HW_H_ +#define _IGC_HW_H_ + +#include +#include +#include + +#include "igc_regs.h" +#include "igc_defines.h" +#include "igc_mac.h" +#include "igc_phy.h" +#include "igc_nvm.h" +#include "igc_i225.h" +#include "igc_base.h" + +#define IGC_DEV_ID_I225_LM 0x15F2 +#define IGC_DEV_ID_I225_V 0x15F3 +#define IGC_DEV_ID_I225_I 0x15F8 +#define IGC_DEV_ID_I220_V 0x15F7 +#define IGC_DEV_ID_I225_K 0x3100 +#define IGC_DEV_ID_I225_K2 0x3101 +#define IGC_DEV_ID_I226_K 0x3102 +#define IGC_DEV_ID_I225_LMVP 0x5502 +#define IGC_DEV_ID_I226_LMVP 0x5503 +#define IGC_DEV_ID_I225_IT 0x0D9F +#define IGC_DEV_ID_I226_LM 0x125B +#define IGC_DEV_ID_I226_V 0x125C +#define IGC_DEV_ID_I226_IT 0x125D +#define IGC_DEV_ID_I221_V 0x125E +#define IGC_DEV_ID_I226_BLANK_NVM 0x125F +#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD + +/* Function pointers for the MAC. */ +struct igc_mac_operations { + s32 (*check_for_link)(struct igc_hw *hw); + s32 (*reset_hw)(struct igc_hw *hw); + s32 (*init_hw)(struct igc_hw *hw); + s32 (*setup_physical_interface)(struct igc_hw *hw); + void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index); + s32 (*read_mac_addr)(struct igc_hw *hw); + s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed, + u16 *duplex); + s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask); + void (*release_swfw_sync)(struct igc_hw *hw, u16 mask); +}; + +enum igc_mac_type { + igc_undefined = 0, + igc_i225, + igc_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum igc_media_type { + igc_media_type_unknown = 0, + igc_media_type_copper = 1, + igc_num_media_types +}; + +enum igc_nvm_type { + igc_nvm_unknown = 0, + igc_nvm_eeprom_spi, +}; + +struct igc_info { + s32 (*get_invariants)(struct igc_hw *hw); + struct igc_mac_operations *mac_ops; + const struct igc_phy_operations *phy_ops; + struct igc_nvm_operations *nvm_ops; +}; + +extern const struct igc_info igc_base_info; + +struct igc_mac_info { + struct igc_mac_operations ops; + + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum igc_mac_type type; + + u32 mc_filter_type; + + u16 mta_reg_count; + u16 uta_reg_count; + + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + bool asf_firmware_present; + bool arc_subsystem_valid; + + bool autoneg; + bool autoneg_failed; + bool get_link_status; +}; + +struct igc_nvm_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data); + s32 (*update)(struct igc_hw *hw); + s32 (*validate)(struct igc_hw *hw); +}; + +struct igc_phy_operations { + s32 (*acquire)(struct igc_hw *hw); + s32 (*check_reset_block)(struct igc_hw *hw); + s32 (*force_speed_duplex)(struct igc_hw *hw); + s32 (*get_phy_info)(struct igc_hw *hw); + s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data); + void (*release)(struct igc_hw *hw); + s32 (*reset)(struct igc_hw *hw); + s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data); +}; + +struct igc_nvm_info { + struct igc_nvm_operations ops; + enum igc_nvm_type type; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct igc_phy_info { + struct igc_phy_operations ops; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum igc_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + + u8 mdix; + + bool is_mdix; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct igc_bus_info { + u16 func; + u16 pci_cmd_word; +}; + +enum igc_fc_mode { + igc_fc_none = 0, + igc_fc_rx_pause, + igc_fc_tx_pause, + igc_fc_full, + igc_fc_default = 0xFF +}; + +struct igc_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum igc_fc_mode current_mode; /* Type of flow control */ + enum igc_fc_mode requested_mode; +}; + +struct igc_dev_spec_base { + bool clear_semaphore_once; + bool eee_enable; +}; + +struct igc_hw { + void *back; + + u8 __iomem *hw_addr; + unsigned long io_base; + + struct igc_mac_info mac; + struct igc_fc_info fc; + struct igc_nvm_info nvm; + struct igc_phy_info phy; + + struct igc_bus_info bus; + + union { + struct igc_dev_spec_base _base; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* Statistics counters collected by the MAC */ +struct igc_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 tlpic; + u64 rlpic; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 htdpmc; + u64 rpthc; + u64 hgptc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; + u64 txdrop; +}; + +struct net_device *igc_get_hw_dev(struct igc_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igc_get_hw_dev(hw), format, ##arg) + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); + +#endif /* _IGC_HW_H_ */ diff --git a/devices/igc/igc_i225-6.4-ethercat.c b/devices/igc/igc_i225-6.4-ethercat.c new file mode 100644 index 00000000..ad36835f --- /dev/null +++ b/devices/igc/igc_i225-6.4-ethercat.c @@ -0,0 +1,641 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw-6.4-ethercat.h" + +/** + * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +static s32 igc_acquire_nvm_i225(struct igc_hw *hw) +{ + return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_release_nvm_i225 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + */ +static void igc_release_nvm_i225(struct igc_hw *hw) +{ + igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) +{ + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + u32 swsm; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._base.clear_semaphore_once) { + hw->dev_spec._base.clear_semaphore_once = false; + igc_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -IGC_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI) + break; + + usleep_range(500, 600); + } + + if (i == timeout) { + /* Release semaphores */ + igc_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -IGC_ERR_NVM; + } + + return 0; +} + +/** + * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + */ +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + s32 i = 0, timeout = 200; + u32 fwmask = mask << 16; + u32 swmask = mask; + s32 ret_val = 0; + u32 swfw_sync; + + while (i < timeout) { + if (igc_get_hw_semaphore_i225(hw)) { + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igc_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igc_release_swfw_sync_i225 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + */ +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + u32 swfw_sync; + + /* Releasing the resource requires first getting the HW semaphore. + * If we fail to get the semaphore, there is nothing we can do, + * except log an error and quit. We are not allowed to hang here + * indefinitely, as it may cause denial of service or system crash. + */ + if (igc_get_hw_semaphore_i225(hw)) { + hw_dbg("Failed to release SW_FW_SYNC.\n"); + return; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +} + +/** + * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + */ +static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_read_nvm_eerd(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + */ +static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, k, eewr = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + for (i = 0; i < words; i++) { + ret_val = -IGC_ERR_NVM; + eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | + (data[i] << IGC_NVM_RW_REG_DATA) | + IGC_NVM_RW_REG_START; + + wr32(IGC_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (IGC_NVM_RW_REG_DONE & + rd32(IGC_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + + return ret_val; +} + +/** + * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + */ +static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_write_nvm_srwr(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) +{ + s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count, + u16 *data); + s32 status = 0; + + status = hw->nvm.ops.acquire(hw); + if (status) + goto out; + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igc_read_nvm_eerd; + + status = igc_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + +out: + return status; +} + +/** + * igc_pool_flash_update_done_i225 - Pool FLUDONE status + * @hw: pointer to the HW structure + */ +static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 i, reg; + + for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { + reg = rd32(IGC_EECD); + if (reg & IGC_EECD_FLUDONE_I225) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igc_update_flash_i225 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + */ +static s32 igc_update_flash_i225(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val == -IGC_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225; + wr32(IGC_EECD, flup); + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum_i225 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + */ +static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) +{ + u16 checksum = 0; + s32 ret_val = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + ret_val = hw->nvm.ops.acquire(hw); + if (ret_val) + goto out; + + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igc_update_flash_i225(hw); + +out: + return ret_val; +} + +/** + * igc_get_flash_presence_i225 - Check if flash device is detected + * @hw: pointer to the HW structure + */ +bool igc_get_flash_presence_i225(struct igc_hw *hw) +{ + bool ret_val = false; + u32 eec = 0; + + eec = rd32(IGC_EECD); + if (eec & IGC_EECD_FLASH_DETECTED_I225) + ret_val = true; + + return ret_val; +} + +/** + * igc_init_nvm_params_i225 - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +s32 igc_init_nvm_params_i225(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igc_acquire_nvm_i225; + nvm->ops.release = igc_release_nvm_i225; + + /* NVM Function Pointers */ + if (igc_get_flash_presence_i225(hw)) { + nvm->ops.read = igc_read_nvm_srrd_i225; + nvm->ops.write = igc_write_nvm_srwr_i225; + nvm->ops.validate = igc_validate_nvm_checksum_i225; + nvm->ops.update = igc_update_nvm_checksum_i225; + } else { + nvm->ops.read = igc_read_nvm_eerd; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return 0; +} + +/** + * igc_set_eee_i225 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv2p5G: boolean flag enabling 2.5G EEE advertisement + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + **/ +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M) +{ + u32 ipcnfg, eeer; + + ipcnfg = rd32(IGC_IPCNFG); + eeer = rd32(IGC_EEER); + + /* enable or disable per user setting */ + if (hw->dev_spec._base.eee_enable) { + u32 eee_su = rd32(IGC_EEE_SU); + + if (adv100M) + ipcnfg |= IGC_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= IGC_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; + + if (adv2p5G) + ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; + + eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & IGC_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | + IGC_IPCNFG_EEE_100M_AN); + eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + } + wr32(IGC_IPCNFG, ipcnfg); + wr32(IGC_EEER, eeer); + rd32(IGC_IPCNFG); + rd32(IGC_EEER); + + return IGC_SUCCESS; +} + +/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC + * settings, otherwise specify that there is no LTR requirement. + */ +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) +{ + u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; + u16 speed, duplex; + s32 size; + + /* If we do not have link, LTR thresholds are zero. */ + if (link) { + hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + + /* Check if using copper interface with EEE enabled or if the + * link speed is 10 Mbps. + */ + if (hw->dev_spec._base.eee_enable && + speed != SPEED_10) { + /* EEE enabled, so send LTRMAX threshold. */ + ltrc = rd32(IGC_LTRC) | + IGC_LTRC_EEEMS_EN; + wr32(IGC_LTRC, ltrc); + + /* Calculate tw_system (nsec). */ + if (speed == SPEED_100) { + tw_system = ((rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_100_MASK) >> + IGC_TW_SYSTEM_100_SHIFT) * 500; + } else { + tw_system = (rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_1000_MASK) * 500; + } + } else { + tw_system = 0; + } + + /* Get the Rx packet buffer size. */ + size = rd32(IGC_RXPBS) & + IGC_RXPBS_SIZE_I225_MASK; + + /* Convert size to bytes, subtract the MTU, and then + * convert the size to bits. + */ + size *= 1024; + size *= 8; + + if (size < 0) { + hw_dbg("Invalid effective Rx buffer size %d\n", + size); + return -IGC_ERR_CONFIG; + } + + /* Calculate the thresholds. Since speed is in Mbps, simplify + * the calculation by multiplying size/speed by 1000 for result + * to be in nsec before dividing by the scale in nsec. Set the + * scale such that the LTR threshold fits in the register. + */ + ltr_min = (1000 * size) / speed; + ltr_max = ltr_min + tw_system; + scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : + IGC_LTRMINV_SCALE_32768; + scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : + IGC_LTRMAXV_SCALE_32768; + ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; + ltr_min -= 1; + ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; + ltr_max -= 1; + + /* Only write the LTR thresholds if they differ from before. */ + ltrv = rd32(IGC_LTRMINV); + if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { + ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | + (scale_min << IGC_LTRMINV_SCALE_SHIFT); + wr32(IGC_LTRMINV, ltrv); + } + + ltrv = rd32(IGC_LTRMAXV); + if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { + ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | + (scale_max << IGC_LTRMAXV_SCALE_SHIFT); + wr32(IGC_LTRMAXV, ltrv); + } + } + + return IGC_SUCCESS; +} diff --git a/devices/igc/igc_i225-6.4-ethercat.h b/devices/igc/igc_i225-6.4-ethercat.h new file mode 100644 index 00000000..dae47e4f --- /dev/null +++ b/devices/igc/igc_i225-6.4-ethercat.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_I225_H_ +#define _IGC_I225_H_ + +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask); +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); + +s32 igc_init_nvm_params_i225(struct igc_hw *hw); +bool igc_get_flash_presence_i225(struct igc_hw *hw); +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M); +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link); + +#endif diff --git a/devices/igc/igc_i225-6.4-orig.c b/devices/igc/igc_i225-6.4-orig.c new file mode 100644 index 00000000..17546a03 --- /dev/null +++ b/devices/igc/igc_i225-6.4-orig.c @@ -0,0 +1,641 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw.h" + +/** + * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +static s32 igc_acquire_nvm_i225(struct igc_hw *hw) +{ + return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_release_nvm_i225 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + */ +static void igc_release_nvm_i225(struct igc_hw *hw) +{ + igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) +{ + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + u32 swsm; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._base.clear_semaphore_once) { + hw->dev_spec._base.clear_semaphore_once = false; + igc_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -IGC_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI) + break; + + usleep_range(500, 600); + } + + if (i == timeout) { + /* Release semaphores */ + igc_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -IGC_ERR_NVM; + } + + return 0; +} + +/** + * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + */ +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + s32 i = 0, timeout = 200; + u32 fwmask = mask << 16; + u32 swmask = mask; + s32 ret_val = 0; + u32 swfw_sync; + + while (i < timeout) { + if (igc_get_hw_semaphore_i225(hw)) { + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igc_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igc_release_swfw_sync_i225 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + */ +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + u32 swfw_sync; + + /* Releasing the resource requires first getting the HW semaphore. + * If we fail to get the semaphore, there is nothing we can do, + * except log an error and quit. We are not allowed to hang here + * indefinitely, as it may cause denial of service or system crash. + */ + if (igc_get_hw_semaphore_i225(hw)) { + hw_dbg("Failed to release SW_FW_SYNC.\n"); + return; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +} + +/** + * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + */ +static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_read_nvm_eerd(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + */ +static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, k, eewr = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + for (i = 0; i < words; i++) { + ret_val = -IGC_ERR_NVM; + eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | + (data[i] << IGC_NVM_RW_REG_DATA) | + IGC_NVM_RW_REG_START; + + wr32(IGC_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (IGC_NVM_RW_REG_DONE & + rd32(IGC_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + + return ret_val; +} + +/** + * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + */ +static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_write_nvm_srwr(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) +{ + s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count, + u16 *data); + s32 status = 0; + + status = hw->nvm.ops.acquire(hw); + if (status) + goto out; + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igc_read_nvm_eerd; + + status = igc_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + +out: + return status; +} + +/** + * igc_pool_flash_update_done_i225 - Pool FLUDONE status + * @hw: pointer to the HW structure + */ +static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 i, reg; + + for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { + reg = rd32(IGC_EECD); + if (reg & IGC_EECD_FLUDONE_I225) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igc_update_flash_i225 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + */ +static s32 igc_update_flash_i225(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val == -IGC_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225; + wr32(IGC_EECD, flup); + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum_i225 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + */ +static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) +{ + u16 checksum = 0; + s32 ret_val = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + ret_val = hw->nvm.ops.acquire(hw); + if (ret_val) + goto out; + + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igc_update_flash_i225(hw); + +out: + return ret_val; +} + +/** + * igc_get_flash_presence_i225 - Check if flash device is detected + * @hw: pointer to the HW structure + */ +bool igc_get_flash_presence_i225(struct igc_hw *hw) +{ + bool ret_val = false; + u32 eec = 0; + + eec = rd32(IGC_EECD); + if (eec & IGC_EECD_FLASH_DETECTED_I225) + ret_val = true; + + return ret_val; +} + +/** + * igc_init_nvm_params_i225 - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +s32 igc_init_nvm_params_i225(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igc_acquire_nvm_i225; + nvm->ops.release = igc_release_nvm_i225; + + /* NVM Function Pointers */ + if (igc_get_flash_presence_i225(hw)) { + nvm->ops.read = igc_read_nvm_srrd_i225; + nvm->ops.write = igc_write_nvm_srwr_i225; + nvm->ops.validate = igc_validate_nvm_checksum_i225; + nvm->ops.update = igc_update_nvm_checksum_i225; + } else { + nvm->ops.read = igc_read_nvm_eerd; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return 0; +} + +/** + * igc_set_eee_i225 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv2p5G: boolean flag enabling 2.5G EEE advertisement + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + **/ +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M) +{ + u32 ipcnfg, eeer; + + ipcnfg = rd32(IGC_IPCNFG); + eeer = rd32(IGC_EEER); + + /* enable or disable per user setting */ + if (hw->dev_spec._base.eee_enable) { + u32 eee_su = rd32(IGC_EEE_SU); + + if (adv100M) + ipcnfg |= IGC_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= IGC_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; + + if (adv2p5G) + ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; + + eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & IGC_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | + IGC_IPCNFG_EEE_100M_AN); + eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + } + wr32(IGC_IPCNFG, ipcnfg); + wr32(IGC_EEER, eeer); + rd32(IGC_IPCNFG); + rd32(IGC_EEER); + + return IGC_SUCCESS; +} + +/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC + * settings, otherwise specify that there is no LTR requirement. + */ +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) +{ + u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; + u16 speed, duplex; + s32 size; + + /* If we do not have link, LTR thresholds are zero. */ + if (link) { + hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + + /* Check if using copper interface with EEE enabled or if the + * link speed is 10 Mbps. + */ + if (hw->dev_spec._base.eee_enable && + speed != SPEED_10) { + /* EEE enabled, so send LTRMAX threshold. */ + ltrc = rd32(IGC_LTRC) | + IGC_LTRC_EEEMS_EN; + wr32(IGC_LTRC, ltrc); + + /* Calculate tw_system (nsec). */ + if (speed == SPEED_100) { + tw_system = ((rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_100_MASK) >> + IGC_TW_SYSTEM_100_SHIFT) * 500; + } else { + tw_system = (rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_1000_MASK) * 500; + } + } else { + tw_system = 0; + } + + /* Get the Rx packet buffer size. */ + size = rd32(IGC_RXPBS) & + IGC_RXPBS_SIZE_I225_MASK; + + /* Convert size to bytes, subtract the MTU, and then + * convert the size to bits. + */ + size *= 1024; + size *= 8; + + if (size < 0) { + hw_dbg("Invalid effective Rx buffer size %d\n", + size); + return -IGC_ERR_CONFIG; + } + + /* Calculate the thresholds. Since speed is in Mbps, simplify + * the calculation by multiplying size/speed by 1000 for result + * to be in nsec before dividing by the scale in nsec. Set the + * scale such that the LTR threshold fits in the register. + */ + ltr_min = (1000 * size) / speed; + ltr_max = ltr_min + tw_system; + scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : + IGC_LTRMINV_SCALE_32768; + scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : + IGC_LTRMAXV_SCALE_32768; + ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; + ltr_min -= 1; + ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; + ltr_max -= 1; + + /* Only write the LTR thresholds if they differ from before. */ + ltrv = rd32(IGC_LTRMINV); + if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { + ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | + (scale_min << IGC_LTRMINV_SCALE_SHIFT); + wr32(IGC_LTRMINV, ltrv); + } + + ltrv = rd32(IGC_LTRMAXV); + if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { + ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | + (scale_max << IGC_LTRMAXV_SCALE_SHIFT); + wr32(IGC_LTRMAXV, ltrv); + } + } + + return IGC_SUCCESS; +} diff --git a/devices/igc/igc_i225-6.4-orig.h b/devices/igc/igc_i225-6.4-orig.h new file mode 100644 index 00000000..dae47e4f --- /dev/null +++ b/devices/igc/igc_i225-6.4-orig.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_I225_H_ +#define _IGC_I225_H_ + +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask); +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); + +s32 igc_init_nvm_params_i225(struct igc_hw *hw); +bool igc_get_flash_presence_i225(struct igc_hw *hw); +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M); +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link); + +#endif diff --git a/devices/igc/igc_i225-6.6-ethercat.c b/devices/igc/igc_i225-6.6-ethercat.c new file mode 100644 index 00000000..f7a1902c --- /dev/null +++ b/devices/igc/igc_i225-6.6-ethercat.c @@ -0,0 +1,641 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw-6.6-ethercat.h" + +/** + * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +static s32 igc_acquire_nvm_i225(struct igc_hw *hw) +{ + return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_release_nvm_i225 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + */ +static void igc_release_nvm_i225(struct igc_hw *hw) +{ + igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) +{ + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + u32 swsm; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._base.clear_semaphore_once) { + hw->dev_spec._base.clear_semaphore_once = false; + igc_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -IGC_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI) + break; + + usleep_range(500, 600); + } + + if (i == timeout) { + /* Release semaphores */ + igc_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -IGC_ERR_NVM; + } + + return 0; +} + +/** + * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + */ +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + s32 i = 0, timeout = 200; + u32 fwmask = mask << 16; + u32 swmask = mask; + s32 ret_val = 0; + u32 swfw_sync; + + while (i < timeout) { + if (igc_get_hw_semaphore_i225(hw)) { + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igc_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igc_release_swfw_sync_i225 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + */ +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + u32 swfw_sync; + + /* Releasing the resource requires first getting the HW semaphore. + * If we fail to get the semaphore, there is nothing we can do, + * except log an error and quit. We are not allowed to hang here + * indefinitely, as it may cause denial of service or system crash. + */ + if (igc_get_hw_semaphore_i225(hw)) { + hw_dbg("Failed to release SW_FW_SYNC.\n"); + return; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +} + +/** + * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + */ +static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_read_nvm_eerd(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + */ +static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, k, eewr = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + for (i = 0; i < words; i++) { + ret_val = -IGC_ERR_NVM; + eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | + (data[i] << IGC_NVM_RW_REG_DATA) | + IGC_NVM_RW_REG_START; + + wr32(IGC_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (IGC_NVM_RW_REG_DONE & + rd32(IGC_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + + return ret_val; +} + +/** + * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + */ +static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_write_nvm_srwr(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) +{ + s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count, + u16 *data); + s32 status = 0; + + status = hw->nvm.ops.acquire(hw); + if (status) + goto out; + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igc_read_nvm_eerd; + + status = igc_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + +out: + return status; +} + +/** + * igc_pool_flash_update_done_i225 - Pool FLUDONE status + * @hw: pointer to the HW structure + */ +static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 i, reg; + + for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { + reg = rd32(IGC_EECD); + if (reg & IGC_EECD_FLUDONE_I225) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igc_update_flash_i225 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + */ +static s32 igc_update_flash_i225(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val == -IGC_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225; + wr32(IGC_EECD, flup); + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum_i225 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + */ +static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) +{ + u16 checksum = 0; + s32 ret_val = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + ret_val = hw->nvm.ops.acquire(hw); + if (ret_val) + goto out; + + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igc_update_flash_i225(hw); + +out: + return ret_val; +} + +/** + * igc_get_flash_presence_i225 - Check if flash device is detected + * @hw: pointer to the HW structure + */ +bool igc_get_flash_presence_i225(struct igc_hw *hw) +{ + bool ret_val = false; + u32 eec = 0; + + eec = rd32(IGC_EECD); + if (eec & IGC_EECD_FLASH_DETECTED_I225) + ret_val = true; + + return ret_val; +} + +/** + * igc_init_nvm_params_i225 - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +s32 igc_init_nvm_params_i225(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igc_acquire_nvm_i225; + nvm->ops.release = igc_release_nvm_i225; + + /* NVM Function Pointers */ + if (igc_get_flash_presence_i225(hw)) { + nvm->ops.read = igc_read_nvm_srrd_i225; + nvm->ops.write = igc_write_nvm_srwr_i225; + nvm->ops.validate = igc_validate_nvm_checksum_i225; + nvm->ops.update = igc_update_nvm_checksum_i225; + } else { + nvm->ops.read = igc_read_nvm_eerd; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return 0; +} + +/** + * igc_set_eee_i225 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv2p5G: boolean flag enabling 2.5G EEE advertisement + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + **/ +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M) +{ + u32 ipcnfg, eeer; + + ipcnfg = rd32(IGC_IPCNFG); + eeer = rd32(IGC_EEER); + + /* enable or disable per user setting */ + if (hw->dev_spec._base.eee_enable) { + u32 eee_su = rd32(IGC_EEE_SU); + + if (adv100M) + ipcnfg |= IGC_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= IGC_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; + + if (adv2p5G) + ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; + + eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & IGC_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | + IGC_IPCNFG_EEE_100M_AN); + eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + } + wr32(IGC_IPCNFG, ipcnfg); + wr32(IGC_EEER, eeer); + rd32(IGC_IPCNFG); + rd32(IGC_EEER); + + return IGC_SUCCESS; +} + +/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC + * settings, otherwise specify that there is no LTR requirement. + */ +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) +{ + u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; + u16 speed, duplex; + s32 size; + + /* If we do not have link, LTR thresholds are zero. */ + if (link) { + hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + + /* Check if using copper interface with EEE enabled or if the + * link speed is 10 Mbps. + */ + if (hw->dev_spec._base.eee_enable && + speed != SPEED_10) { + /* EEE enabled, so send LTRMAX threshold. */ + ltrc = rd32(IGC_LTRC) | + IGC_LTRC_EEEMS_EN; + wr32(IGC_LTRC, ltrc); + + /* Calculate tw_system (nsec). */ + if (speed == SPEED_100) { + tw_system = ((rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_100_MASK) >> + IGC_TW_SYSTEM_100_SHIFT) * 500; + } else { + tw_system = (rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_1000_MASK) * 500; + } + } else { + tw_system = 0; + } + + /* Get the Rx packet buffer size. */ + size = rd32(IGC_RXPBS) & + IGC_RXPBS_SIZE_I225_MASK; + + /* Convert size to bytes, subtract the MTU, and then + * convert the size to bits. + */ + size *= 1024; + size *= 8; + + if (size < 0) { + hw_dbg("Invalid effective Rx buffer size %d\n", + size); + return -IGC_ERR_CONFIG; + } + + /* Calculate the thresholds. Since speed is in Mbps, simplify + * the calculation by multiplying size/speed by 1000 for result + * to be in nsec before dividing by the scale in nsec. Set the + * scale such that the LTR threshold fits in the register. + */ + ltr_min = (1000 * size) / speed; + ltr_max = ltr_min + tw_system; + scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : + IGC_LTRMINV_SCALE_32768; + scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : + IGC_LTRMAXV_SCALE_32768; + ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; + ltr_min -= 1; + ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; + ltr_max -= 1; + + /* Only write the LTR thresholds if they differ from before. */ + ltrv = rd32(IGC_LTRMINV); + if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { + ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | + (scale_min << IGC_LTRMINV_SCALE_SHIFT); + wr32(IGC_LTRMINV, ltrv); + } + + ltrv = rd32(IGC_LTRMAXV); + if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { + ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | + (scale_max << IGC_LTRMAXV_SCALE_SHIFT); + wr32(IGC_LTRMAXV, ltrv); + } + } + + return IGC_SUCCESS; +} diff --git a/devices/igc/igc_i225-6.6-ethercat.h b/devices/igc/igc_i225-6.6-ethercat.h new file mode 100644 index 00000000..dae47e4f --- /dev/null +++ b/devices/igc/igc_i225-6.6-ethercat.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_I225_H_ +#define _IGC_I225_H_ + +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask); +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); + +s32 igc_init_nvm_params_i225(struct igc_hw *hw); +bool igc_get_flash_presence_i225(struct igc_hw *hw); +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M); +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link); + +#endif diff --git a/devices/igc/igc_i225-6.6-orig.c b/devices/igc/igc_i225-6.6-orig.c new file mode 100644 index 00000000..17546a03 --- /dev/null +++ b/devices/igc/igc_i225-6.6-orig.c @@ -0,0 +1,641 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include + +#include "igc_hw.h" + +/** + * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +static s32 igc_acquire_nvm_i225(struct igc_hw *hw) +{ + return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_release_nvm_i225 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + */ +static void igc_release_nvm_i225(struct igc_hw *hw) +{ + igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM); +} + +/** + * igc_get_hw_semaphore_i225 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw) +{ + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + u32 swsm; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._base.clear_semaphore_once) { + hw->dev_spec._base.clear_semaphore_once = false; + igc_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + if (!(swsm & IGC_SWSM_SMBI)) + break; + + usleep_range(500, 600); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -IGC_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(IGC_SWSM); + wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI) + break; + + usleep_range(500, 600); + } + + if (i == timeout) { + /* Release semaphores */ + igc_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -IGC_ERR_NVM; + } + + return 0; +} + +/** + * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + */ +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + s32 i = 0, timeout = 200; + u32 fwmask = mask << 16; + u32 swmask = mask; + s32 ret_val = 0; + u32 swfw_sync; + + while (i < timeout) { + if (igc_get_hw_semaphore_i225(hw)) { + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igc_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -IGC_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igc_release_swfw_sync_i225 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + */ +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask) +{ + u32 swfw_sync; + + /* Releasing the resource requires first getting the HW semaphore. + * If we fail to get the semaphore, there is nothing we can do, + * except log an error and quit. We are not allowed to hang here + * indefinitely, as it may cause denial of service or system crash. + */ + if (igc_get_hw_semaphore_i225(hw)) { + hw_dbg("Failed to release SW_FW_SYNC.\n"); + return; + } + + swfw_sync = rd32(IGC_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(IGC_SW_FW_SYNC, swfw_sync); + + igc_put_hw_semaphore(hw); +} + +/** + * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + */ +static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_read_nvm_eerd(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + */ +static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, k, eewr = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + for (i = 0; i < words; i++) { + ret_val = -IGC_ERR_NVM; + eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) | + (data[i] << IGC_NVM_RW_REG_DATA) | + IGC_NVM_RW_REG_START; + + wr32(IGC_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (IGC_NVM_RW_REG_DONE & + rd32(IGC_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + + return ret_val; +} + +/** + * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If igc_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + */ +static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) { + count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ? + IGC_EERD_EEWR_MAX_COUNT : (words - i); + + status = hw->nvm.ops.acquire(hw); + if (status) + break; + + status = igc_write_nvm_srwr(hw, offset, count, data + i); + hw->nvm.ops.release(hw); + if (status) + break; + } + + return status; +} + +/** + * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw) +{ + s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count, + u16 *data); + s32 status = 0; + + status = hw->nvm.ops.acquire(hw); + if (status) + goto out; + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igc_read_nvm_eerd; + + status = igc_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + +out: + return status; +} + +/** + * igc_pool_flash_update_done_i225 - Pool FLUDONE status + * @hw: pointer to the HW structure + */ +static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 i, reg; + + for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) { + reg = rd32(IGC_EECD); + if (reg & IGC_EECD_FLUDONE_I225) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igc_update_flash_i225 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + */ +static s32 igc_update_flash_i225(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val == -IGC_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225; + wr32(IGC_EECD, flup); + + ret_val = igc_pool_flash_update_done_i225(hw); + if (ret_val) + hw_dbg("Flash update time out\n"); + else + hw_dbg("Flash update complete\n"); + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum_i225 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + */ +static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw) +{ + u16 checksum = 0; + s32 ret_val = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + ret_val = hw->nvm.ops.acquire(hw); + if (ret_val) + goto out; + + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igc_update_flash_i225(hw); + +out: + return ret_val; +} + +/** + * igc_get_flash_presence_i225 - Check if flash device is detected + * @hw: pointer to the HW structure + */ +bool igc_get_flash_presence_i225(struct igc_hw *hw) +{ + bool ret_val = false; + u32 eec = 0; + + eec = rd32(IGC_EECD); + if (eec & IGC_EECD_FLASH_DETECTED_I225) + ret_val = true; + + return ret_val; +} + +/** + * igc_init_nvm_params_i225 - Init NVM func ptrs. + * @hw: pointer to the HW structure + */ +s32 igc_init_nvm_params_i225(struct igc_hw *hw) +{ + struct igc_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igc_acquire_nvm_i225; + nvm->ops.release = igc_release_nvm_i225; + + /* NVM Function Pointers */ + if (igc_get_flash_presence_i225(hw)) { + nvm->ops.read = igc_read_nvm_srrd_i225; + nvm->ops.write = igc_write_nvm_srwr_i225; + nvm->ops.validate = igc_validate_nvm_checksum_i225; + nvm->ops.update = igc_update_nvm_checksum_i225; + } else { + nvm->ops.read = igc_read_nvm_eerd; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return 0; +} + +/** + * igc_set_eee_i225 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv2p5G: boolean flag enabling 2.5G EEE advertisement + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + **/ +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M) +{ + u32 ipcnfg, eeer; + + ipcnfg = rd32(IGC_IPCNFG); + eeer = rd32(IGC_EEER); + + /* enable or disable per user setting */ + if (hw->dev_spec._base.eee_enable) { + u32 eee_su = rd32(IGC_EEE_SU); + + if (adv100M) + ipcnfg |= IGC_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= IGC_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; + + if (adv2p5G) + ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; + + eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & IGC_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | + IGC_IPCNFG_EEE_100M_AN); + eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + } + wr32(IGC_IPCNFG, ipcnfg); + wr32(IGC_EEER, eeer); + rd32(IGC_IPCNFG); + rd32(IGC_EEER); + + return IGC_SUCCESS; +} + +/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC + * settings, otherwise specify that there is no LTR requirement. + */ +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) +{ + u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; + u16 speed, duplex; + s32 size; + + /* If we do not have link, LTR thresholds are zero. */ + if (link) { + hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + + /* Check if using copper interface with EEE enabled or if the + * link speed is 10 Mbps. + */ + if (hw->dev_spec._base.eee_enable && + speed != SPEED_10) { + /* EEE enabled, so send LTRMAX threshold. */ + ltrc = rd32(IGC_LTRC) | + IGC_LTRC_EEEMS_EN; + wr32(IGC_LTRC, ltrc); + + /* Calculate tw_system (nsec). */ + if (speed == SPEED_100) { + tw_system = ((rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_100_MASK) >> + IGC_TW_SYSTEM_100_SHIFT) * 500; + } else { + tw_system = (rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_1000_MASK) * 500; + } + } else { + tw_system = 0; + } + + /* Get the Rx packet buffer size. */ + size = rd32(IGC_RXPBS) & + IGC_RXPBS_SIZE_I225_MASK; + + /* Convert size to bytes, subtract the MTU, and then + * convert the size to bits. + */ + size *= 1024; + size *= 8; + + if (size < 0) { + hw_dbg("Invalid effective Rx buffer size %d\n", + size); + return -IGC_ERR_CONFIG; + } + + /* Calculate the thresholds. Since speed is in Mbps, simplify + * the calculation by multiplying size/speed by 1000 for result + * to be in nsec before dividing by the scale in nsec. Set the + * scale such that the LTR threshold fits in the register. + */ + ltr_min = (1000 * size) / speed; + ltr_max = ltr_min + tw_system; + scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : + IGC_LTRMINV_SCALE_32768; + scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : + IGC_LTRMAXV_SCALE_32768; + ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; + ltr_min -= 1; + ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; + ltr_max -= 1; + + /* Only write the LTR thresholds if they differ from before. */ + ltrv = rd32(IGC_LTRMINV); + if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { + ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | + (scale_min << IGC_LTRMINV_SCALE_SHIFT); + wr32(IGC_LTRMINV, ltrv); + } + + ltrv = rd32(IGC_LTRMAXV); + if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { + ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | + (scale_max << IGC_LTRMAXV_SCALE_SHIFT); + wr32(IGC_LTRMAXV, ltrv); + } + } + + return IGC_SUCCESS; +} diff --git a/devices/igc/igc_i225-6.6-orig.h b/devices/igc/igc_i225-6.6-orig.h new file mode 100644 index 00000000..dae47e4f --- /dev/null +++ b/devices/igc/igc_i225-6.6-orig.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_I225_H_ +#define _IGC_I225_H_ + +s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask); +void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); + +s32 igc_init_nvm_params_i225(struct igc_hw *hw); +bool igc_get_flash_presence_i225(struct igc_hw *hw); +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M); +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link); + +#endif diff --git a/devices/igc/igc_mac-6.4-ethercat.c b/devices/igc/igc_mac-6.4-ethercat.c new file mode 100644 index 00000000..3d0ab7fa --- /dev/null +++ b/devices/igc/igc_mac-6.4-ethercat.c @@ -0,0 +1,881 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include + +#include "igc_mac-6.4-ethercat.h" +#include "igc_hw-6.4-ethercat.h" + +/** + * igc_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + */ +s32 igc_disable_pcie_master(struct igc_hw *hw) +{ + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_GIO_MASTER_DISABLE; + wr32(IGC_CTRL, ctrl); + + while (timeout) { + if (!(rd32(IGC_STATUS) & + IGC_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(2000, 3000); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_init_rx_addrs - Initialize receive addresses + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + */ +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) +{ + u8 mac_addr[ETH_ALEN] = {0}; + u32 i; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igc_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + */ +static s32 igc_set_fc_watermarks(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(IGC_FCRTL, fcrtl); + wr32(IGC_FCRTH, fcrth); + + return 0; +} + +/** + * igc_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + */ +s32 igc_setup_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igc_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * to the both 'rx' and 'tx' pause frames. + */ + if (hw->fc.requested_mode == igc_fc_default) + hw->fc.requested_mode = igc_fc_full; + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(IGC_FCT, FLOW_CONTROL_TYPE); + wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(IGC_FCTTV, hw->fc.pause_time); + + ret_val = igc_set_fc_watermarks(hw); + +out: + return ret_val; +} + +/** + * igc_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + */ +s32 igc_force_mac_fc(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case igc_fc_none: + ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE)); + break; + case igc_fc_rx_pause: + ctrl &= (~IGC_CTRL_TFCE); + ctrl |= IGC_CTRL_RFCE; + break; + case igc_fc_tx_pause: + ctrl &= (~IGC_CTRL_RFCE); + ctrl |= IGC_CTRL_TFCE; + break; + case igc_fc_full: + ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + wr32(IGC_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igc_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + */ +void igc_clear_hw_cntrs_base(struct igc_hw *hw) +{ + rd32(IGC_CRCERRS); + rd32(IGC_MPC); + rd32(IGC_SCC); + rd32(IGC_ECOL); + rd32(IGC_MCC); + rd32(IGC_LATECOL); + rd32(IGC_COLC); + rd32(IGC_RERC); + rd32(IGC_DC); + rd32(IGC_RLEC); + rd32(IGC_XONRXC); + rd32(IGC_XONTXC); + rd32(IGC_XOFFRXC); + rd32(IGC_XOFFTXC); + rd32(IGC_FCRUC); + rd32(IGC_GPRC); + rd32(IGC_BPRC); + rd32(IGC_MPRC); + rd32(IGC_GPTC); + rd32(IGC_GORCL); + rd32(IGC_GORCH); + rd32(IGC_GOTCL); + rd32(IGC_GOTCH); + rd32(IGC_RNBC); + rd32(IGC_RUC); + rd32(IGC_RFC); + rd32(IGC_ROC); + rd32(IGC_RJC); + rd32(IGC_TORL); + rd32(IGC_TORH); + rd32(IGC_TOTL); + rd32(IGC_TOTH); + rd32(IGC_TPR); + rd32(IGC_TPT); + rd32(IGC_MPTC); + rd32(IGC_BPTC); + + rd32(IGC_PRC64); + rd32(IGC_PRC127); + rd32(IGC_PRC255); + rd32(IGC_PRC511); + rd32(IGC_PRC1023); + rd32(IGC_PRC1522); + rd32(IGC_PTC64); + rd32(IGC_PTC127); + rd32(IGC_PTC255); + rd32(IGC_PTC511); + rd32(IGC_PTC1023); + rd32(IGC_PTC1522); + + rd32(IGC_ALGNERRC); + rd32(IGC_RXERRC); + rd32(IGC_TNCRS); + rd32(IGC_HTDPMC); + rd32(IGC_TSCTC); + + rd32(IGC_MGTPRC); + rd32(IGC_MGTPDC); + rd32(IGC_MGTPTC); + + rd32(IGC_IAC); + + rd32(IGC_RPTHC); + rd32(IGC_TLPIC); + rd32(IGC_RLPIC); + rd32(IGC_HGPTC); + rd32(IGC_RXDMTC); + rd32(IGC_HGORCL); + rd32(IGC_HGORCH); + rd32(IGC_HGOTCL); + rd32(IGC_HGOTCH); + rd32(IGC_LENERRS); +} + +/** + * igc_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + */ +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= IGC_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(IGC_RAL(index), rar_low); + wrfl(); + wr32(IGC_RAH(index), rar_high); + wrfl(); +} + +/** + * igc_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + */ +s32 igc_check_for_copper_link(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + bool link = false; + s32 ret_val; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igc_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igc_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igc_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igc_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + /* Now that we are aware of our link settings, we can set the LTR + * thresholds. + */ + ret_val = igc_set_ltr_i225(hw, link); + + return ret_val; +} + +/** + * igc_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void igc_config_collision_dist(struct igc_hw *hw) +{ + u32 tctl; + + tctl = rd32(IGC_TCTL); + + tctl &= ~IGC_TCTL_COLD; + tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT; + + wr32(IGC_TCTL, tctl); + wrfl(); +} + +/** + * igc_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + */ +s32 igc_config_fc_after_link_up(struct igc_hw *hw) +{ + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + struct igc_mac_info *mac = &hw->mac; + u16 speed, duplex; + s32 ret_val = 0; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) + ret_val = igc_force_mac_fc(hw); + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if (mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | igc_fc_none + * 0 | 1 | 0 | DC | igc_fc_none + * 0 | 1 | 1 | 0 | igc_fc_none + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + * 1 | 0 | 0 | DC | igc_fc_none + * 1 | DC | 1 | DC | igc_fc_full + * 1 | 1 | 0 | 0 | igc_fc_none + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | IGC_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == igc_fc_full) { + hw->fc.current_mode = igc_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == igc_fc_none) || + (hw->fc.requested_mode == igc_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = igc_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = igc_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igc_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + */ +s32 igc_get_auto_rd_done(struct igc_hw *hw) +{ + s32 ret_val = 0; + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -IGC_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + */ +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both 1 Gbps + * and 2.5 Gbps link modes. An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + *speed = SPEED_2500; + hw_dbg("2500 Mbs, "); + } else { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & IGC_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igc_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + */ +void igc_put_hw_semaphore(struct igc_hw *hw) +{ + u32 swsm; + + swsm = rd32(IGC_SWSM); + + swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI); + + wr32(IGC_SWSM, swsm); +} + +/** + * igc_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + */ +bool igc_enable_mng_pass_thru(struct igc_hw *hw) +{ + bool ret_val = false; + u32 fwsm, factps; + u32 manc; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(IGC_MANC); + + if (!(manc & IGC_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(IGC_FWSM); + factps = rd32(IGC_FACTPS); + + if (!(factps & IGC_FACTPS_MNGCG) && + ((fwsm & IGC_FWSM_MODE_MASK) == + (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & IGC_MANC_SMBUS_EN) && + !(manc & IGC_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igc_mta_set() + **/ +static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igc_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = igc_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += ETH_ALEN; + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); +} diff --git a/devices/igc/igc_mac-6.4-ethercat.h b/devices/igc/igc_mac-6.4-ethercat.h new file mode 100644 index 00000000..262c9116 --- /dev/null +++ b/devices/igc/igc_mac-6.4-ethercat.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_MAC_H_ +#define _IGC_MAC_H_ + +#include "igc_hw-6.4-ethercat.h" +#include "igc_phy-6.4-ethercat.h" +#include "igc_defines-6.4-ethercat.h" + +/* forward declaration */ +s32 igc_disable_pcie_master(struct igc_hw *hw); +s32 igc_check_for_copper_link(struct igc_hw *hw); +s32 igc_config_fc_after_link_up(struct igc_hw *hw); +s32 igc_force_mac_fc(struct igc_hw *hw); +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count); +s32 igc_setup_link(struct igc_hw *hw); +void igc_clear_hw_cntrs_base(struct igc_hw *hw); +s32 igc_get_auto_rd_done(struct igc_hw *hw); +void igc_put_hw_semaphore(struct igc_hw *hw); +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index); +void igc_config_collision_dist(struct igc_hw *hw); + +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex); + +bool igc_enable_mng_pass_thru(struct igc_hw *hw); +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); + +enum igc_mng_mode { + igc_mng_mode_none = 0, + igc_mng_mode_asf, + igc_mng_mode_pt, + igc_mng_mode_ipmi, + igc_mng_mode_host_if_only +}; + +#endif diff --git a/devices/igc/igc_mac-6.4-orig.c b/devices/igc/igc_mac-6.4-orig.c new file mode 100644 index 00000000..a5c4b19d --- /dev/null +++ b/devices/igc/igc_mac-6.4-orig.c @@ -0,0 +1,881 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include + +#include "igc_mac.h" +#include "igc_hw.h" + +/** + * igc_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + */ +s32 igc_disable_pcie_master(struct igc_hw *hw) +{ + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_GIO_MASTER_DISABLE; + wr32(IGC_CTRL, ctrl); + + while (timeout) { + if (!(rd32(IGC_STATUS) & + IGC_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(2000, 3000); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_init_rx_addrs - Initialize receive addresses + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + */ +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) +{ + u8 mac_addr[ETH_ALEN] = {0}; + u32 i; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igc_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + */ +static s32 igc_set_fc_watermarks(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(IGC_FCRTL, fcrtl); + wr32(IGC_FCRTH, fcrth); + + return 0; +} + +/** + * igc_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + */ +s32 igc_setup_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igc_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * to the both 'rx' and 'tx' pause frames. + */ + if (hw->fc.requested_mode == igc_fc_default) + hw->fc.requested_mode = igc_fc_full; + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(IGC_FCT, FLOW_CONTROL_TYPE); + wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(IGC_FCTTV, hw->fc.pause_time); + + ret_val = igc_set_fc_watermarks(hw); + +out: + return ret_val; +} + +/** + * igc_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + */ +s32 igc_force_mac_fc(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case igc_fc_none: + ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE)); + break; + case igc_fc_rx_pause: + ctrl &= (~IGC_CTRL_TFCE); + ctrl |= IGC_CTRL_RFCE; + break; + case igc_fc_tx_pause: + ctrl &= (~IGC_CTRL_RFCE); + ctrl |= IGC_CTRL_TFCE; + break; + case igc_fc_full: + ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + wr32(IGC_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igc_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + */ +void igc_clear_hw_cntrs_base(struct igc_hw *hw) +{ + rd32(IGC_CRCERRS); + rd32(IGC_MPC); + rd32(IGC_SCC); + rd32(IGC_ECOL); + rd32(IGC_MCC); + rd32(IGC_LATECOL); + rd32(IGC_COLC); + rd32(IGC_RERC); + rd32(IGC_DC); + rd32(IGC_RLEC); + rd32(IGC_XONRXC); + rd32(IGC_XONTXC); + rd32(IGC_XOFFRXC); + rd32(IGC_XOFFTXC); + rd32(IGC_FCRUC); + rd32(IGC_GPRC); + rd32(IGC_BPRC); + rd32(IGC_MPRC); + rd32(IGC_GPTC); + rd32(IGC_GORCL); + rd32(IGC_GORCH); + rd32(IGC_GOTCL); + rd32(IGC_GOTCH); + rd32(IGC_RNBC); + rd32(IGC_RUC); + rd32(IGC_RFC); + rd32(IGC_ROC); + rd32(IGC_RJC); + rd32(IGC_TORL); + rd32(IGC_TORH); + rd32(IGC_TOTL); + rd32(IGC_TOTH); + rd32(IGC_TPR); + rd32(IGC_TPT); + rd32(IGC_MPTC); + rd32(IGC_BPTC); + + rd32(IGC_PRC64); + rd32(IGC_PRC127); + rd32(IGC_PRC255); + rd32(IGC_PRC511); + rd32(IGC_PRC1023); + rd32(IGC_PRC1522); + rd32(IGC_PTC64); + rd32(IGC_PTC127); + rd32(IGC_PTC255); + rd32(IGC_PTC511); + rd32(IGC_PTC1023); + rd32(IGC_PTC1522); + + rd32(IGC_ALGNERRC); + rd32(IGC_RXERRC); + rd32(IGC_TNCRS); + rd32(IGC_HTDPMC); + rd32(IGC_TSCTC); + + rd32(IGC_MGTPRC); + rd32(IGC_MGTPDC); + rd32(IGC_MGTPTC); + + rd32(IGC_IAC); + + rd32(IGC_RPTHC); + rd32(IGC_TLPIC); + rd32(IGC_RLPIC); + rd32(IGC_HGPTC); + rd32(IGC_RXDMTC); + rd32(IGC_HGORCL); + rd32(IGC_HGORCH); + rd32(IGC_HGOTCL); + rd32(IGC_HGOTCH); + rd32(IGC_LENERRS); +} + +/** + * igc_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + */ +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= IGC_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(IGC_RAL(index), rar_low); + wrfl(); + wr32(IGC_RAH(index), rar_high); + wrfl(); +} + +/** + * igc_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + */ +s32 igc_check_for_copper_link(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + bool link = false; + s32 ret_val; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igc_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igc_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igc_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igc_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + /* Now that we are aware of our link settings, we can set the LTR + * thresholds. + */ + ret_val = igc_set_ltr_i225(hw, link); + + return ret_val; +} + +/** + * igc_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void igc_config_collision_dist(struct igc_hw *hw) +{ + u32 tctl; + + tctl = rd32(IGC_TCTL); + + tctl &= ~IGC_TCTL_COLD; + tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT; + + wr32(IGC_TCTL, tctl); + wrfl(); +} + +/** + * igc_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + */ +s32 igc_config_fc_after_link_up(struct igc_hw *hw) +{ + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + struct igc_mac_info *mac = &hw->mac; + u16 speed, duplex; + s32 ret_val = 0; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) + ret_val = igc_force_mac_fc(hw); + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if (mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | igc_fc_none + * 0 | 1 | 0 | DC | igc_fc_none + * 0 | 1 | 1 | 0 | igc_fc_none + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + * 1 | 0 | 0 | DC | igc_fc_none + * 1 | DC | 1 | DC | igc_fc_full + * 1 | 1 | 0 | 0 | igc_fc_none + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | IGC_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == igc_fc_full) { + hw->fc.current_mode = igc_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == igc_fc_none) || + (hw->fc.requested_mode == igc_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = igc_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = igc_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igc_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + */ +s32 igc_get_auto_rd_done(struct igc_hw *hw) +{ + s32 ret_val = 0; + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -IGC_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + */ +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both 1 Gbps + * and 2.5 Gbps link modes. An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + *speed = SPEED_2500; + hw_dbg("2500 Mbs, "); + } else { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & IGC_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igc_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + */ +void igc_put_hw_semaphore(struct igc_hw *hw) +{ + u32 swsm; + + swsm = rd32(IGC_SWSM); + + swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI); + + wr32(IGC_SWSM, swsm); +} + +/** + * igc_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + */ +bool igc_enable_mng_pass_thru(struct igc_hw *hw) +{ + bool ret_val = false; + u32 fwsm, factps; + u32 manc; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(IGC_MANC); + + if (!(manc & IGC_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(IGC_FWSM); + factps = rd32(IGC_FACTPS); + + if (!(factps & IGC_FACTPS_MNGCG) && + ((fwsm & IGC_FWSM_MODE_MASK) == + (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & IGC_MANC_SMBUS_EN) && + !(manc & IGC_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igc_mta_set() + **/ +static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igc_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = igc_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += ETH_ALEN; + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); +} diff --git a/devices/igc/igc_mac-6.4-orig.h b/devices/igc/igc_mac-6.4-orig.h new file mode 100644 index 00000000..b5963f86 --- /dev/null +++ b/devices/igc/igc_mac-6.4-orig.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_MAC_H_ +#define _IGC_MAC_H_ + +#include "igc_hw.h" +#include "igc_phy.h" +#include "igc_defines.h" + +/* forward declaration */ +s32 igc_disable_pcie_master(struct igc_hw *hw); +s32 igc_check_for_copper_link(struct igc_hw *hw); +s32 igc_config_fc_after_link_up(struct igc_hw *hw); +s32 igc_force_mac_fc(struct igc_hw *hw); +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count); +s32 igc_setup_link(struct igc_hw *hw); +void igc_clear_hw_cntrs_base(struct igc_hw *hw); +s32 igc_get_auto_rd_done(struct igc_hw *hw); +void igc_put_hw_semaphore(struct igc_hw *hw); +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index); +void igc_config_collision_dist(struct igc_hw *hw); + +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex); + +bool igc_enable_mng_pass_thru(struct igc_hw *hw); +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); + +enum igc_mng_mode { + igc_mng_mode_none = 0, + igc_mng_mode_asf, + igc_mng_mode_pt, + igc_mng_mode_ipmi, + igc_mng_mode_host_if_only +}; + +#endif diff --git a/devices/igc/igc_mac-6.6-ethercat.c b/devices/igc/igc_mac-6.6-ethercat.c new file mode 100644 index 00000000..08007ca8 --- /dev/null +++ b/devices/igc/igc_mac-6.6-ethercat.c @@ -0,0 +1,881 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include + +#include "igc_mac-6.6-ethercat.h" +#include "igc_hw-6.6-ethercat.h" + +/** + * igc_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + */ +s32 igc_disable_pcie_master(struct igc_hw *hw) +{ + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_GIO_MASTER_DISABLE; + wr32(IGC_CTRL, ctrl); + + while (timeout) { + if (!(rd32(IGC_STATUS) & + IGC_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(2000, 3000); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_init_rx_addrs - Initialize receive addresses + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + */ +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) +{ + u8 mac_addr[ETH_ALEN] = {0}; + u32 i; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igc_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + */ +static s32 igc_set_fc_watermarks(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(IGC_FCRTL, fcrtl); + wr32(IGC_FCRTH, fcrth); + + return 0; +} + +/** + * igc_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + */ +s32 igc_setup_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igc_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * to the both 'rx' and 'tx' pause frames. + */ + if (hw->fc.requested_mode == igc_fc_default) + hw->fc.requested_mode = igc_fc_full; + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(IGC_FCT, FLOW_CONTROL_TYPE); + wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(IGC_FCTTV, hw->fc.pause_time); + + ret_val = igc_set_fc_watermarks(hw); + +out: + return ret_val; +} + +/** + * igc_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + */ +s32 igc_force_mac_fc(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case igc_fc_none: + ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE)); + break; + case igc_fc_rx_pause: + ctrl &= (~IGC_CTRL_TFCE); + ctrl |= IGC_CTRL_RFCE; + break; + case igc_fc_tx_pause: + ctrl &= (~IGC_CTRL_RFCE); + ctrl |= IGC_CTRL_TFCE; + break; + case igc_fc_full: + ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + wr32(IGC_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igc_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + */ +void igc_clear_hw_cntrs_base(struct igc_hw *hw) +{ + rd32(IGC_CRCERRS); + rd32(IGC_MPC); + rd32(IGC_SCC); + rd32(IGC_ECOL); + rd32(IGC_MCC); + rd32(IGC_LATECOL); + rd32(IGC_COLC); + rd32(IGC_RERC); + rd32(IGC_DC); + rd32(IGC_RLEC); + rd32(IGC_XONRXC); + rd32(IGC_XONTXC); + rd32(IGC_XOFFRXC); + rd32(IGC_XOFFTXC); + rd32(IGC_FCRUC); + rd32(IGC_GPRC); + rd32(IGC_BPRC); + rd32(IGC_MPRC); + rd32(IGC_GPTC); + rd32(IGC_GORCL); + rd32(IGC_GORCH); + rd32(IGC_GOTCL); + rd32(IGC_GOTCH); + rd32(IGC_RNBC); + rd32(IGC_RUC); + rd32(IGC_RFC); + rd32(IGC_ROC); + rd32(IGC_RJC); + rd32(IGC_TORL); + rd32(IGC_TORH); + rd32(IGC_TOTL); + rd32(IGC_TOTH); + rd32(IGC_TPR); + rd32(IGC_TPT); + rd32(IGC_MPTC); + rd32(IGC_BPTC); + + rd32(IGC_PRC64); + rd32(IGC_PRC127); + rd32(IGC_PRC255); + rd32(IGC_PRC511); + rd32(IGC_PRC1023); + rd32(IGC_PRC1522); + rd32(IGC_PTC64); + rd32(IGC_PTC127); + rd32(IGC_PTC255); + rd32(IGC_PTC511); + rd32(IGC_PTC1023); + rd32(IGC_PTC1522); + + rd32(IGC_ALGNERRC); + rd32(IGC_RXERRC); + rd32(IGC_TNCRS); + rd32(IGC_HTDPMC); + rd32(IGC_TSCTC); + + rd32(IGC_MGTPRC); + rd32(IGC_MGTPDC); + rd32(IGC_MGTPTC); + + rd32(IGC_IAC); + + rd32(IGC_RPTHC); + rd32(IGC_TLPIC); + rd32(IGC_RLPIC); + rd32(IGC_HGPTC); + rd32(IGC_RXDMTC); + rd32(IGC_HGORCL); + rd32(IGC_HGORCH); + rd32(IGC_HGOTCL); + rd32(IGC_HGOTCH); + rd32(IGC_LENERRS); +} + +/** + * igc_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + */ +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= IGC_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(IGC_RAL(index), rar_low); + wrfl(); + wr32(IGC_RAH(index), rar_high); + wrfl(); +} + +/** + * igc_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + */ +s32 igc_check_for_copper_link(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + bool link = false; + s32 ret_val; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igc_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igc_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igc_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igc_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + /* Now that we are aware of our link settings, we can set the LTR + * thresholds. + */ + ret_val = igc_set_ltr_i225(hw, link); + + return ret_val; +} + +/** + * igc_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void igc_config_collision_dist(struct igc_hw *hw) +{ + u32 tctl; + + tctl = rd32(IGC_TCTL); + + tctl &= ~IGC_TCTL_COLD; + tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT; + + wr32(IGC_TCTL, tctl); + wrfl(); +} + +/** + * igc_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + */ +s32 igc_config_fc_after_link_up(struct igc_hw *hw) +{ + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + struct igc_mac_info *mac = &hw->mac; + u16 speed, duplex; + s32 ret_val = 0; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) + ret_val = igc_force_mac_fc(hw); + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if (mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | igc_fc_none + * 0 | 1 | 0 | DC | igc_fc_none + * 0 | 1 | 1 | 0 | igc_fc_none + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + * 1 | 0 | 0 | DC | igc_fc_none + * 1 | DC | 1 | DC | igc_fc_full + * 1 | 1 | 0 | 0 | igc_fc_none + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | IGC_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == igc_fc_full) { + hw->fc.current_mode = igc_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == igc_fc_none) || + (hw->fc.requested_mode == igc_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = igc_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = igc_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igc_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + */ +s32 igc_get_auto_rd_done(struct igc_hw *hw) +{ + s32 ret_val = 0; + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -IGC_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + */ +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both 1 Gbps + * and 2.5 Gbps link modes. An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + *speed = SPEED_2500; + hw_dbg("2500 Mbs, "); + } else { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & IGC_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igc_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + */ +void igc_put_hw_semaphore(struct igc_hw *hw) +{ + u32 swsm; + + swsm = rd32(IGC_SWSM); + + swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI); + + wr32(IGC_SWSM, swsm); +} + +/** + * igc_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + */ +bool igc_enable_mng_pass_thru(struct igc_hw *hw) +{ + bool ret_val = false; + u32 fwsm, factps; + u32 manc; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(IGC_MANC); + + if (!(manc & IGC_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(IGC_FWSM); + factps = rd32(IGC_FACTPS); + + if (!(factps & IGC_FACTPS_MNGCG) && + ((fwsm & IGC_FWSM_MODE_MASK) == + (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & IGC_MANC_SMBUS_EN) && + !(manc & IGC_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igc_mta_set() + **/ +static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igc_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = igc_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += ETH_ALEN; + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); +} diff --git a/devices/igc/igc_mac-6.6-ethercat.h b/devices/igc/igc_mac-6.6-ethercat.h new file mode 100644 index 00000000..89b0dc9c --- /dev/null +++ b/devices/igc/igc_mac-6.6-ethercat.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_MAC_H_ +#define _IGC_MAC_H_ + +#include "igc_hw-6.6-ethercat.h" +#include "igc_phy-6.6-ethercat.h" +#include "igc_defines-6.6-ethercat.h" + +/* forward declaration */ +s32 igc_disable_pcie_master(struct igc_hw *hw); +s32 igc_check_for_copper_link(struct igc_hw *hw); +s32 igc_config_fc_after_link_up(struct igc_hw *hw); +s32 igc_force_mac_fc(struct igc_hw *hw); +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count); +s32 igc_setup_link(struct igc_hw *hw); +void igc_clear_hw_cntrs_base(struct igc_hw *hw); +s32 igc_get_auto_rd_done(struct igc_hw *hw); +void igc_put_hw_semaphore(struct igc_hw *hw); +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index); +void igc_config_collision_dist(struct igc_hw *hw); + +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex); + +bool igc_enable_mng_pass_thru(struct igc_hw *hw); +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); + +enum igc_mng_mode { + igc_mng_mode_none = 0, + igc_mng_mode_asf, + igc_mng_mode_pt, + igc_mng_mode_ipmi, + igc_mng_mode_host_if_only +}; + +#endif diff --git a/devices/igc/igc_mac-6.6-orig.c b/devices/igc/igc_mac-6.6-orig.c new file mode 100644 index 00000000..a5c4b19d --- /dev/null +++ b/devices/igc/igc_mac-6.6-orig.c @@ -0,0 +1,881 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include + +#include "igc_mac.h" +#include "igc_hw.h" + +/** + * igc_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + */ +s32 igc_disable_pcie_master(struct igc_hw *hw) +{ + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_GIO_MASTER_DISABLE; + wr32(IGC_CTRL, ctrl); + + while (timeout) { + if (!(rd32(IGC_STATUS) & + IGC_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(2000, 3000); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_init_rx_addrs - Initialize receive addresses + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + */ +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count) +{ + u8 mac_addr[ETH_ALEN] = {0}; + u32 i; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igc_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + */ +static s32 igc_set_fc_watermarks(struct igc_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & igc_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= IGC_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(IGC_FCRTL, fcrtl); + wr32(IGC_FCRTH, fcrth); + + return 0; +} + +/** + * igc_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + */ +s32 igc_setup_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igc_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * to the both 'rx' and 'tx' pause frames. + */ + if (hw->fc.requested_mode == igc_fc_default) + hw->fc.requested_mode = igc_fc_full; + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(IGC_FCT, FLOW_CONTROL_TYPE); + wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(IGC_FCTTV, hw->fc.pause_time); + + ret_val = igc_set_fc_watermarks(hw); + +out: + return ret_val; +} + +/** + * igc_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + */ +s32 igc_force_mac_fc(struct igc_hw *hw) +{ + s32 ret_val = 0; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case igc_fc_none: + ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE)); + break; + case igc_fc_rx_pause: + ctrl &= (~IGC_CTRL_TFCE); + ctrl |= IGC_CTRL_RFCE; + break; + case igc_fc_tx_pause: + ctrl &= (~IGC_CTRL_RFCE); + ctrl |= IGC_CTRL_TFCE; + break; + case igc_fc_full: + ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + wr32(IGC_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igc_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + */ +void igc_clear_hw_cntrs_base(struct igc_hw *hw) +{ + rd32(IGC_CRCERRS); + rd32(IGC_MPC); + rd32(IGC_SCC); + rd32(IGC_ECOL); + rd32(IGC_MCC); + rd32(IGC_LATECOL); + rd32(IGC_COLC); + rd32(IGC_RERC); + rd32(IGC_DC); + rd32(IGC_RLEC); + rd32(IGC_XONRXC); + rd32(IGC_XONTXC); + rd32(IGC_XOFFRXC); + rd32(IGC_XOFFTXC); + rd32(IGC_FCRUC); + rd32(IGC_GPRC); + rd32(IGC_BPRC); + rd32(IGC_MPRC); + rd32(IGC_GPTC); + rd32(IGC_GORCL); + rd32(IGC_GORCH); + rd32(IGC_GOTCL); + rd32(IGC_GOTCH); + rd32(IGC_RNBC); + rd32(IGC_RUC); + rd32(IGC_RFC); + rd32(IGC_ROC); + rd32(IGC_RJC); + rd32(IGC_TORL); + rd32(IGC_TORH); + rd32(IGC_TOTL); + rd32(IGC_TOTH); + rd32(IGC_TPR); + rd32(IGC_TPT); + rd32(IGC_MPTC); + rd32(IGC_BPTC); + + rd32(IGC_PRC64); + rd32(IGC_PRC127); + rd32(IGC_PRC255); + rd32(IGC_PRC511); + rd32(IGC_PRC1023); + rd32(IGC_PRC1522); + rd32(IGC_PTC64); + rd32(IGC_PTC127); + rd32(IGC_PTC255); + rd32(IGC_PTC511); + rd32(IGC_PTC1023); + rd32(IGC_PTC1522); + + rd32(IGC_ALGNERRC); + rd32(IGC_RXERRC); + rd32(IGC_TNCRS); + rd32(IGC_HTDPMC); + rd32(IGC_TSCTC); + + rd32(IGC_MGTPRC); + rd32(IGC_MGTPDC); + rd32(IGC_MGTPTC); + + rd32(IGC_IAC); + + rd32(IGC_RPTHC); + rd32(IGC_TLPIC); + rd32(IGC_RLPIC); + rd32(IGC_HGPTC); + rd32(IGC_RXDMTC); + rd32(IGC_HGORCL); + rd32(IGC_HGORCH); + rd32(IGC_HGOTCL); + rd32(IGC_HGOTCH); + rd32(IGC_LENERRS); +} + +/** + * igc_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + */ +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= IGC_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(IGC_RAL(index), rar_low); + wrfl(); + wr32(IGC_RAH(index), rar_high); + wrfl(); +} + +/** + * igc_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + */ +s32 igc_check_for_copper_link(struct igc_hw *hw) +{ + struct igc_mac_info *mac = &hw->mac; + bool link = false; + s32 ret_val; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igc_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igc_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -IGC_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igc_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igc_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + /* Now that we are aware of our link settings, we can set the LTR + * thresholds. + */ + ret_val = igc_set_ltr_i225(hw, link); + + return ret_val; +} + +/** + * igc_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + */ +void igc_config_collision_dist(struct igc_hw *hw) +{ + u32 tctl; + + tctl = rd32(IGC_TCTL); + + tctl &= ~IGC_TCTL_COLD; + tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT; + + wr32(IGC_TCTL, tctl); + wrfl(); +} + +/** + * igc_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + */ +s32 igc_config_fc_after_link_up(struct igc_hw *hw) +{ + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + struct igc_mac_info *mac = &hw->mac; + u16 speed, duplex; + s32 ret_val = 0; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) + ret_val = igc_force_mac_fc(hw); + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if (mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | igc_fc_none + * 0 | 1 | 0 | DC | igc_fc_none + * 0 | 1 | 1 | 0 | igc_fc_none + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + * 1 | 0 | 0 | DC | igc_fc_none + * 1 | DC | 1 | DC | igc_fc_full + * 1 | 1 | 0 | 0 | igc_fc_none + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | IGC_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == igc_fc_full) { + hw->fc.current_mode = igc_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | igc_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | igc_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == igc_fc_none) || + (hw->fc.requested_mode == igc_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = igc_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = igc_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = igc_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igc_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + */ +s32 igc_get_auto_rd_done(struct igc_hw *hw) +{ + s32 ret_val = 0; + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -IGC_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + */ +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both 1 Gbps + * and 2.5 Gbps link modes. An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + *speed = SPEED_2500; + hw_dbg("2500 Mbs, "); + } else { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & IGC_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igc_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + */ +void igc_put_hw_semaphore(struct igc_hw *hw) +{ + u32 swsm; + + swsm = rd32(IGC_SWSM); + + swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI); + + wr32(IGC_SWSM, swsm); +} + +/** + * igc_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + */ +bool igc_enable_mng_pass_thru(struct igc_hw *hw) +{ + bool ret_val = false; + u32 fwsm, factps; + u32 manc; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(IGC_MANC); + + if (!(manc & IGC_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(IGC_FWSM); + factps = rd32(IGC_FACTPS); + + if (!(factps & IGC_FACTPS_MNGCG) && + ((fwsm & IGC_FWSM_MODE_MASK) == + (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & IGC_MANC_SMBUS_EN) && + !(manc & IGC_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igc_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igc_mta_set() + **/ +static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igc_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = igc_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); + mc_addr_list += ETH_ALEN; + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); +} diff --git a/devices/igc/igc_mac-6.6-orig.h b/devices/igc/igc_mac-6.6-orig.h new file mode 100644 index 00000000..b5963f86 --- /dev/null +++ b/devices/igc/igc_mac-6.6-orig.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_MAC_H_ +#define _IGC_MAC_H_ + +#include "igc_hw.h" +#include "igc_phy.h" +#include "igc_defines.h" + +/* forward declaration */ +s32 igc_disable_pcie_master(struct igc_hw *hw); +s32 igc_check_for_copper_link(struct igc_hw *hw); +s32 igc_config_fc_after_link_up(struct igc_hw *hw); +s32 igc_force_mac_fc(struct igc_hw *hw); +void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count); +s32 igc_setup_link(struct igc_hw *hw); +void igc_clear_hw_cntrs_base(struct igc_hw *hw); +s32 igc_get_auto_rd_done(struct igc_hw *hw); +void igc_put_hw_semaphore(struct igc_hw *hw); +void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index); +void igc_config_collision_dist(struct igc_hw *hw); + +s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed, + u16 *duplex); + +bool igc_enable_mng_pass_thru(struct igc_hw *hw); +void igc_update_mc_addr_list(struct igc_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); + +enum igc_mng_mode { + igc_mng_mode_none = 0, + igc_mng_mode_asf, + igc_mng_mode_pt, + igc_mng_mode_ipmi, + igc_mng_mode_host_if_only +}; + +#endif diff --git a/devices/igc/igc_main-6.4-ethercat.c b/devices/igc/igc_main-6.4-ethercat.c new file mode 100644 index 00000000..7c6b5dc8 --- /dev/null +++ b/devices/igc/igc_main-6.4-ethercat.c @@ -0,0 +1,7461 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "igc-6.4-ethercat.h" +#include "igc_hw-6.4-ethercat.h" +#include "igc_tsn-6.4-ethercat.h" +#include "igc_xdp-6.4-ethercat.h" + +#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver (EtherCAT enabled)" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +#define IGC_XDP_PASS 0 +#define IGC_XDP_CONSUMED BIT(0) +#define IGC_XDP_TX BIT(1) +#define IGC_XDP_REDIRECT BIT(2) + +static int debug = -1; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL v2"); +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +char igc_driver_name[] = "ec_igc"; +static const char igc_driver_string[] = DRV_SUMMARY; +static const char igc_copyright[] = + "Copyright(c) 2018 Intel Corporation."; + +static const struct igc_info *igc_info_tbl[] = { + [board_base] = &igc_base_info, +}; + +static const struct pci_device_id igc_pci_tbl[] = { + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, + /* required last entry */ + {0, } +}; + +// MODULE_DEVICE_TABLE(pci, igc_pci_tbl); + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +void igc_reset(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition PBA for greater than 9k MTU if required */ + pba = IGC_PBA_34K; + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + hw->mac.ops.reset_hw(hw); + + if (hw->mac.ops.init_hw(hw)) + netdev_err(dev, "Error on hardware initialization\n"); + + /* Re-establish EEE setting */ + igc_set_eee_i225(hw, true, true, true); + + if (!adapter->ecdev && !netif_running(adapter->netdev)) + igc_power_down_phy_copper_base(&adapter->hw); + + /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ + wr32(IGC_VET, ETH_P_8021Q); + + /* Re-enable PTP, where applicable. */ + igc_ptp_reset(adapter); + + /* Re-enable TSN offloading, where applicable. */ + igc_tsn_reset(adapter); + + igc_get_phy_info(hw); +} + +/** + * igc_power_up_link - Power up the phy link + * @adapter: address of board private structure + */ +static void igc_power_up_link(struct igc_adapter *adapter) +{ + igc_reset_phy(&adapter->hw); + + igc_power_up_phy_copper(&adapter->hw); + + igc_setup_link(&adapter->hw); +} + +/** + * igc_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void igc_release_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + if (!pci_device_is_present(adapter->pdev)) + return; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); +} + +/** + * igc_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + */ +static void igc_get_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); +} + +static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) +{ + dma_unmap_single(dev, dma_unmap_addr(buf, dma), + dma_unmap_len(buf, len), DMA_TO_DEVICE); + + dma_unmap_len_set(buf, len, 0); +} + +/** + * igc_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + */ +static void igc_clean_tx_ring(struct igc_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + u32 xsk_frames = 0; + + while (i != tx_ring->next_to_use) { + union igc_adv_tx_desc *eop_desc, *tx_desc; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + { + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + if (!adapter->ecdev) { + /* skb is reused in EtherCAT TX operation */ + dev_kfree_skb_any(tx_buffer->skb); + } + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + } + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGC_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + if (tx_ring->xsk_pool && xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* Zero out the buffer ring */ + memset(tx_ring->tx_buffer_info, 0, + sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igc_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + */ +void igc_free_tx_resources(struct igc_ring *tx_ring) +{ + igc_disable_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igc_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void igc_free_all_tx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igc_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_tx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igc_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igc_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + */ +int igc_setup_tx_resources(struct igc_ring *tx_ring) +{ + struct net_device *ndev = tx_ring->netdev; + struct device *dev = tx_ring->dev; + int size = 0; + + size = sizeof(struct igc_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_tx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igc_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + netdev_err(dev, "Error on Tx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } +} + +static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) +{ + struct igc_rx_buffer *bi; + u16 i; + + for (i = 0; i < ring->count; i++) { + bi = &ring->rx_buffer_info[i]; + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + } +} + +/** + * igc_clean_rx_ring - Free Rx Buffers per Queue + * @ring: ring to free buffers from + */ +static void igc_clean_rx_ring(struct igc_ring *ring) +{ + if (ring->xsk_pool) + igc_clean_rx_ring_xsk_pool(ring); + else + igc_clean_rx_ring_page_shared(ring); + + clear_ring_uses_large_buffer(ring); + + ring->next_to_alloc = 0; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +/** + * igc_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_rx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igc_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igc_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + */ +void igc_free_rx_resources(struct igc_ring *rx_ring) +{ + igc_clean_rx_ring(rx_ring); + + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igc_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + */ +static void igc_free_all_rx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + igc_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igc_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + */ +int igc_setup_rx_resources(struct igc_ring *rx_ring) +{ + struct net_device *ndev = rx_ring->netdev; + struct device *dev = rx_ring->dev; + u8 index = rx_ring->queue_index; + int size, desc_len, res; + + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, + rx_ring->q_vector->napi.napi_id); + if (res < 0) { + netdev_err(ndev, "Failed to register xdp_rxq index %u\n", + index); + return res; + } + + size = sizeof(struct igc_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + desc_len = sizeof(union igc_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_rx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igc_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + netdev_err(dev, "Error on Rx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + if (!igc_xdp_is_enabled(adapter) || + !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) + return NULL; + + return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); +} + +/** + * igc_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + union igc_adv_rx_desc *rx_desc; + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + u64 rdba = ring->dma; + u32 buf_size; + + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL)); + } + + if (igc_xdp_is_enabled(adapter)) + set_ring_uses_large_buffer(ring); + + /* disable the queue */ + wr32(IGC_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(IGC_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(IGC_RDBAH(reg_idx), rdba >> 32); + wr32(IGC_RDLEN(reg_idx), + ring->count * sizeof(union igc_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + IGC_RDT(reg_idx); + wr32(IGC_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* reset next-to- use/clean to place SW in sync with hardware */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + if (ring->xsk_pool) + buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); + else if (ring_uses_large_buffer(ring)) + buf_size = IGC_RXBUFFER_3072; + else + buf_size = IGC_RXBUFFER_2048; + + srrctl = rd32(IGC_SRRCTL(reg_idx)); + srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | + IGC_SRRCTL_DESCTYPE_MASK); + srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); + srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); + srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; + + wr32(IGC_SRRCTL(reg_idx), srrctl); + + rxdctl |= IGC_RX_PTHRESH; + rxdctl |= IGC_RX_HTHRESH << 8; + rxdctl |= IGC_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igc_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGC_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; + + wr32(IGC_RXDCTL(reg_idx), rxdctl); +} + +/** + * igc_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx(struct igc_adapter *adapter) +{ + int i; + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + igc_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igc_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + */ +static void igc_configure_tx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + u32 txdctl = 0; + + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + + /* disable the queue */ + wr32(IGC_TXDCTL(reg_idx), 0); + wrfl(); + + wr32(IGC_TDLEN(reg_idx), + ring->count * sizeof(union igc_adv_tx_desc)); + wr32(IGC_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(IGC_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + IGC_TDT(reg_idx); + wr32(IGC_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGC_TX_PTHRESH; + txdctl |= IGC_TX_HTHRESH << 8; + txdctl |= IGC_TX_WTHRESH << 16; + + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + wr32(IGC_TXDCTL(reg_idx), txdctl); +} + +/** + * igc_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + */ +static void igc_configure_tx(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igc_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + */ +static void igc_setup_mrqc(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); +} + +/** + * igc_setup_rctl - configure the receive control registers + * @adapter: Board private structure + */ +static void igc_setup_rctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(IGC_RCTL); + + rctl &= ~(3 << IGC_RCTL_MO_SHIFT); + rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); + + rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); + + /* enable stripping of CRC. Newer features require + * that the HW strips the CRC. + */ + rctl |= IGC_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= IGC_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(IGC_RXDCTL(0), 0); + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in set_rx_mode + */ + rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ + IGC_RCTL_BAM | /* RX All Bcast Pkts */ + IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ + IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ + } + + wr32(IGC_RCTL, rctl); +} + +/** + * igc_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + */ +static void igc_setup_tctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which icould be enabled by default */ + wr32(IGC_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_CT; + tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | + (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); + + /* Enable transmits */ + tctl |= IGC_TCTL_EN; + + wr32(IGC_TCTL, tctl); +} + +/** + * igc_set_mac_filter_hw() - Set MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be set + * @index: Filter index + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + */ +static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, + enum igc_mac_filter_type type, + const u8 *addr, int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 ral, rah; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + ral = le32_to_cpup((__le32 *)(addr)); + rah = le16_to_cpup((__le16 *)(addr + 4)); + + if (type == IGC_MAC_FILTER_TYPE_SRC) { + rah &= ~IGC_RAH_ASEL_MASK; + rah |= IGC_RAH_ASEL_SRC_ADDR; + } + + if (queue >= 0) { + rah &= ~IGC_RAH_QSEL_MASK; + rah |= (queue << IGC_RAH_QSEL_SHIFT); + rah |= IGC_RAH_QSEL_ENABLE; + } + + rah |= IGC_RAH_AV; + + wr32(IGC_RAL(index), ral); + wr32(IGC_RAH(index), rah); + + netdev_dbg(dev, "MAC address filter set in HW: index %d", index); +} + +/** + * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be cleared + * @index: Filter index + */ +static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + wr32(IGC_RAL(index), 0); + wr32(IGC_RAH(index), 0); + + netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igc_set_default_mac_filter(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + u8 *addr = adapter->hw.mac.addr; + + netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); + + igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +/** + * igc_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int igc_set_mac(struct net_device *netdev, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igc_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igc_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igc_write_mc_addr_list(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igc_update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igc_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, + bool *first_flag, bool *insert_empty) +{ + struct igc_adapter *adapter = netdev_priv(ring->netdev); + ktime_t cycle_time = adapter->cycle_time; + ktime_t base_time = adapter->base_time; + ktime_t now = ktime_get_clocktai(); + ktime_t baset_est, end_of_cycle; + s32 launchtime; + s64 n; + + n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + + baset_est = ktime_add_ns(base_time, cycle_time * (n)); + end_of_cycle = ktime_add_ns(baset_est, cycle_time); + + if (ktime_compare(txtime, end_of_cycle) >= 0) { + if (baset_est != ring->last_ff_cycle) { + *first_flag = true; + ring->last_ff_cycle = baset_est; + + if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) + *insert_empty = true; + } + } + + /* Introducing a window at end of cycle on which packets + * potentially not honor launchtime. Window of 5us chosen + * considering software update the tail pointer and packets + * are dma'ed to packet buffer. + */ + if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) + netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", + txtime); + + ring->last_tx_cycle = end_of_cycle; + + launchtime = ktime_sub_ns(txtime, baset_est); + if (launchtime > 0) + div_s64_rem(launchtime, cycle_time, &launchtime); + else + launchtime = 0; + + return cpu_to_le32(launchtime); +} + +static int igc_init_empty_frame(struct igc_ring *ring, + struct igc_tx_buffer *buffer, + struct sk_buff *skb) +{ + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + + dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->protocol = 0; + buffer->bytecount = skb->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, skb->len); + dma_unmap_addr_set(buffer, dma, dma); + + return 0; +} + +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) +{ + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + err = igc_init_empty_frame(ring, first, skb); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + first->bytecount; + olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + + netdev_tx_sent_queue(txring_txq(ring), skb->len); + + first->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 + +static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, + __le32 launch_time, bool first_flag, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct igc_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IGC_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; + + /* For i225, context index must be unique per ring. */ + if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + if (first_flag) + mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + context_desc->launch_time = launch_time; +} + +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGC_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, 0); +} + +static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + struct igc_adapter *adapter = netdev_priv(netdev); + if (!adapter->ecdev) { + netif_stop_subqueue(netdev, tx_ring->queue_index); + } + + /* memory barriier comment */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igc_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + if (!adapter->ecdev) { + netif_wake_subqueue(netdev, tx_ring->queue_index); + } + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + if (igc_desc_unused(tx_ring) >= size) + return 0; + return __igc_maybe_stop_tx(tx_ring, size); +} + +#define IGC_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IGC_ADVTXD_DTYP_DATA | + IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, + IGC_ADVTXD_DCMD_VLE); + + /* set segmentation bits for TSO */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, + (IGC_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, + (IGC_ADVTXD_MAC_TSTAMP)); + + /* insert frame checksum */ + cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igc_tx_olinfo_status(struct igc_ring *tx_ring, + union igc_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; + + /* insert L4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * + ((IGC_TXD_POPTS_TXSM << 8) / + IGC_TX_FLAGS_CSUM); + + /* insert IPv4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * + (((IGC_TXD_POPTS_IXSM << 8)) / + IGC_TX_FLAGS_IPV4); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int igc_tx_map(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 tx_flags = first->tx_flags; + skb_frag_t *frag; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + dma_addr_t dma; + u32 cmd_type; + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + + cmd_type = igc_tx_cmd_type(skb, tx_flags); + tx_desc = IGC_TX_DESC(tx_ring, i); + + igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGC_MAX_DATA_PER_TXD; + size -= IGC_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGC_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igc_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + + return 0; +dma_error: + netdev_err(tx_ring->netdev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (!adapter->ecdev) { + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + } + + tx_ring->next_to_use = i; + + return -1; +} + +static int igc_tso(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM | + IGC_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, mss_l4len_idx); + + return 1; +} + +static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, + struct igc_ring *tx_ring) +{ + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + bool first_flag = false, insert_empty = false; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + struct igc_tx_buffer *first; + __le32 launch_time = 0; + u32 tx_flags = 0; + unsigned short f; + ktime_t txtime; + u8 hdr_len = 0; + int tso = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igc_maybe_stop_tx(tx_ring, count + 5)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (!tx_ring->launchtime_enable) + goto done; + + txtime = skb->tstamp; + skb->tstamp = ktime_set(0, 0); + launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + + if (insert_empty) { + struct igc_tx_buffer *empty_info; + struct sk_buff *empty; + void *data; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (!empty) + goto done; + + data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + if (igc_init_tx_empty_descriptor(tx_ring, + empty, + empty_info) < 0) + dev_kfree_skb_any(empty); + } + +done: + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGC_TX_BUFFER_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (adapter->qbv_transition || tx_ring->oper_gate_closed) + goto out_drop; + + if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { + adapter->stats.txdrop++; + goto out_drop; + } + + if (unlikely(!adapter->ecdev && + test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { + /* FIXME: add support for retrieving timestamps from + * the other timer registers before skipping the + * timestamping request. + */ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + if (!adapter->ptp_tx_skb) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGC_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + } else { + adapter->tx_hwtstamp_skipped++; + } + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGC_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igc_tx_csum(tx_ring, first, launch_time, first_flag); + + igc_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + if (!adapter->ecdev) { + dev_kfree_skb_any(first->skb); + first->skb = NULL; + } + + return NETDEV_TX_OK; +} + +static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + + return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); +} + +static void igc_rx_checksum(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igc_test_staterr(rx_desc, + IGC_RXDEXT_STATERR_L4E | + IGC_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets (aka let the stack check the crc32c) + */ + if (!(skb->len == 60 && + test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | + IGC_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netdev_dbg(ring->netdev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +/* Mapping HW RSS Type to enum pkt_hash_types */ +static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { + [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2, + [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, + [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ + [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */ + [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */ + [13] = PKT_HASH_TYPE_NONE, + [14] = PKT_HASH_TYPE_NONE, + [15] = PKT_HASH_TYPE_NONE, +}; + +static inline void igc_rx_hash(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) { + u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + u32 rss_type = igc_rss_type(rx_desc); + + skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]); + } +} + +static void igc_rx_vlan(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + u16 vid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { + if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && + test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/** + * igc_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in order + * to populate the hash, checksum, VLAN, protocol, and other fields within the + * skb. + */ +static void igc_process_skb_fields(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + igc_rx_hash(rx_ring, rx_desc, skb); + + igc_rx_checksum(rx_ring, rx_desc, skb); + + igc_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl |= IGC_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~IGC_CTRL_VME; + } + wr32(IGC_CTRL, ctrl); +} + +static void igc_restore_vlan(struct igc_adapter *adapter) +{ + igc_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, + const unsigned int size, + int *rx_buffer_pgcnt) +{ + struct igc_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, + unsigned int truesize) +{ +#if (PAGE_SIZE < 8192) + buffer->page_offset ^= truesize; +#else + buffer->page_offset += truesize; +#endif +} + +static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(ring) / 2; +#else + truesize = ring_uses_build_skb(ring) ? + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +/** + * igc_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + */ +static void igc_add_rx_frag(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(rx_ring) / 2; +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + + igc_rx_buffer_flip(rx_buffer, truesize); +} + +static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp) +{ + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* build an skb around the page buffer */ + skb = napi_build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, size); + if (metasize) + skb_metadata_set(skb, metasize); + + igc_rx_buffer_flip(rx_buffer, truesize); + return skb; +} + +static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + void *va = xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IGC_RX_HDR_LEN + metasize); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGC_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, + ALIGN(headlen + metasize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); + igc_rx_buffer_flip(rx_buffer, truesize); + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * igc_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void igc_reuse_rx_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct igc_rx_buffer *new_buff; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGC_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igc_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + */ +static bool igc_is_non_eop(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGC_RX_DESC(rx_ring, ntc)); + + if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igc_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool igc_cleanup_headers(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static void igc_put_rx_buffer(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + /* hand second half of page back to the ring */ + igc_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) +{ + struct igc_adapter *adapter = rx_ring->q_vector->adapter; + + if (ring_uses_build_skb(rx_ring)) + return IGC_SKB_PAD; + if (igc_xdp_is_enabled(adapter)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igc_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igc_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: rx descriptor ring + * @cleaned_count: number of buffers to clean + */ +static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) +{ + union igc_adv_rx_desc *rx_desc; + u16 i = rx_ring->next_to_use; + struct igc_rx_buffer *bi; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGC_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igc_rx_bufsz(rx_ring); + + do { + if (!igc_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGC_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) +{ + union igc_adv_rx_desc *desc; + u16 i = ring->next_to_use; + struct igc_rx_buffer *bi; + dma_addr_t dma; + bool ok = true; + + if (!count) + return ok; + + XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff); + + desc = IGC_RX_DESC(ring, i); + bi = &ring->rx_buffer_info[i]; + i -= ring->count; + + do { + bi->xdp = xsk_buff_alloc(ring->xsk_pool); + if (!bi->xdp) { + ok = false; + break; + } + + dma = xsk_buff_xdp_get_dma(bi->xdp); + desc->read.pkt_addr = cpu_to_le64(dma); + + desc++; + bi++; + i++; + if (unlikely(!i)) { + desc = IGC_RX_DESC(ring, 0); + bi = ring->rx_buffer_info; + i -= ring->count; + } + + /* Clear the length for the next_to_use descriptor. */ + desc->wb.upper.length = 0; + + count--; + } while (count); + + i += ring->count; + + if (ring->next_to_use != i) { + ring->next_to_use = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, ring->tail); + } + + return ok; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, + struct xdp_frame *xdpf) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 count, index = ring->next_to_use; + struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; + struct igc_tx_buffer *buffer = head; + union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); + u32 olinfo_status, len = xdpf->len, cmd_type; + void *data = xdpf->data; + u16 i; + + count = TXD_USE_COUNT(len); + for (i = 0; i < nr_frags; i++) + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); + + if (igc_maybe_stop_tx(ring, count + 3)) { + /* this is a hard error */ + return -EBUSY; + } + + i = 0; + head->bytecount = xdp_get_frame_len(xdpf); + head->type = IGC_TX_BUFFER_TYPE_XDP; + head->gso_segs = 1; + head->xdpf = xdpf; + + olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + for (;;) { + dma_addr_t dma; + + dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, + "Failed to map DMA for TX\n"); + goto unmap; + } + + dma_unmap_len_set(buffer, len, len); + dma_unmap_addr_set(buffer, dma, dma); + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | len; + + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.buffer_addr = cpu_to_le64(dma); + + buffer->protocol = 0; + + if (++index == ring->count) + index = 0; + + if (i == nr_frags) + break; + + buffer = &ring->tx_buffer_info[index]; + desc = IGC_TX_DESC(ring, index); + desc->read.olinfo_status = 0; + + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } + desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); + + netdev_tx_sent_queue(txring_txq(ring), head->bytecount); + /* set the timestamp */ + head->time_stamp = jiffies; + /* set next_to_watch value indicating a packet is present */ + head->next_to_watch = desc; + ring->next_to_use = index; + + return 0; + +unmap: + for (;;) { + buffer = &ring->tx_buffer_info[index]; + if (dma_unmap_len(buffer, len)) + dma_unmap_page(ring->dev, + dma_unmap_addr(buffer, dma), + dma_unmap_len(buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(buffer, len, 0); + if (buffer == head) + break; + + if (!index) + index += ring->count; + index--; + } + + return -ENOMEM; +} + +static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= adapter->num_tx_queues) + index -= adapter->num_tx_queues; + + return adapter->tx_ring[index]; +} + +static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int res; + + if (unlikely(!xdpf)) + return -EFAULT; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + res = igc_xdp_init_tx_descriptor(ring, xdpf); + __netif_tx_unlock(nq); + return res; +} + +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __igc_xdp_run_prog(struct igc_adapter *adapter, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act = bpf_prog_run_xdp(prog, xdp); + + switch (act) { + case XDP_PASS: + return IGC_XDP_PASS; + case XDP_TX: + if (igc_xdp_xmit_back(adapter, xdp) < 0) + goto out_failure; + return IGC_XDP_TX; + case XDP_REDIRECT: + if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) + goto out_failure; + return IGC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + return IGC_XDP_CONSUMED; + } +} + +static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + prog = READ_ONCE(adapter->xdp_prog); + if (!prog) { + res = IGC_XDP_PASS; + goto out; + } + + res = __igc_xdp_run_prog(adapter, prog, xdp); + +out: + return ERR_PTR(-res); +} + +/* This function assumes __netif_tx_lock is held by the caller. */ +static void igc_flush_tx_descriptors(struct igc_ring *ring) +{ + /* Once tail pointer is updated, hardware can fetch the descriptors + * any time so we issue a write membar here to ensure all memory + * writes are complete before the tail pointer is updated. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static void igc_finalize_xdp(struct igc_adapter *adapter, int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + + if (status & IGC_XDP_TX) { + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + igc_flush_tx_descriptors(ring); + __netif_tx_unlock(nq); + } + + if (status & IGC_XDP_REDIRECT) + xdp_do_flush(); +} + +static void igc_update_rx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->rx.ring; + + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.packets += packets; + ring->rx_stats.bytes += bytes; + u64_stats_update_end(&ring->rx_syncp); + + q_vector->rx.total_packets += packets; + q_vector->rx.total_bytes += bytes; +} + +static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + u16 cleaned_count = igc_desc_unused(rx_ring); + int xdp_status = 0, rx_buffer_pgcnt; + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *rx_desc; + struct igc_rx_buffer *rx_buffer; + unsigned int size, truesize; + struct igc_xdp_buff ctx; + ktime_t timestamp = 0; + int pkt_offset = 0; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGC_RX_BUFFER_WRITE) { + igc_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); + truesize = igc_get_rx_frame_truesize(rx_ring, size); + + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + pktbuf); + pkt_offset = IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + if (!skb) { + xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); + xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), + igc_rx_offset(rx_ring) + pkt_offset, + size, true); + xdp_buff_clear_frags_flag(&ctx.xdp); + ctx.rx_desc = rx_desc; + + skb = igc_xdp_run_prog(adapter, &ctx.xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + switch (xdp_res) { + case IGC_XDP_CONSUMED: + rx_buffer->pagecnt_bias++; + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + igc_rx_buffer_flip(rx_buffer, truesize); + xdp_status |= xdp_res; + break; + } + + total_packets++; + total_bytes += size; + } + else if (adapter->ecdev) { + unsigned char *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); + ecdev_receive(adapter->ecdev, va, size); + adapter->ec_watchdog_jiffies = jiffies; + igc_reuse_rx_page(rx_ring, rx_buffer); + } + else { + if (skb) + igc_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp); + else + skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp, + timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); + } + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igc_is_non_eop(rx_ring, rx_desc)) + continue; + + + if (adapter->ecdev) { + total_packets++; + continue; + } + + /* verify the packet layout is correct */ + if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + igc_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (cleaned_count) + igc_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, + struct xdp_buff *xdp) +{ + unsigned int totalsize = xdp->data_end - xdp->data_meta; + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + net_prefetch(xdp->data_meta); + + skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + return skb; +} + +static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, + union igc_adv_rx_desc *desc, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + struct igc_ring *ring = q_vector->rx.ring; + struct sk_buff *skb; + + skb = igc_construct_skb_zc(ring, xdp); + if (!skb) { + ring->rx_stats.alloc_failed++; + return; + } + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + if (igc_cleanup_headers(ring, desc, skb)) + return; + + igc_process_skb_fields(ring, desc, skb); + napi_gro_receive(&q_vector->napi, skb); +} + +static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp) +{ + /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The + * igc_xdp_buff shares its layout with xdp_buff_xsk and private + * igc_xdp_buff fields fall into xdp_buff_xsk->cb + */ + return (struct igc_xdp_buff *)xdp; +} + +static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *ring = q_vector->rx.ring; + u16 cleaned_count = igc_desc_unused(ring); + int total_bytes = 0, total_packets = 0; + u16 ntc = ring->next_to_clean; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + + rcu_read_lock(); + + prog = READ_ONCE(adapter->xdp_prog); + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *desc; + struct igc_rx_buffer *bi; + struct igc_xdp_buff *ctx; + ktime_t timestamp = 0; + unsigned int size; + int res; + + desc = IGC_RX_DESC(ring, ntc); + size = le16_to_cpu(desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = &ring->rx_buffer_info[ntc]; + + ctx = xsk_buff_to_igc_ctx(bi->xdp); + ctx->rx_desc = desc; + + if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + bi->xdp->data); + + bi->xdp->data += IGC_TS_HDR_LEN; + + /* HW timestamp has been copied into local variable. Metadata + * length when XDP program is called should be 0. + */ + bi->xdp->data_meta += IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); + + res = __igc_xdp_run_prog(adapter, prog, bi->xdp); + switch (res) { + case IGC_XDP_PASS: + igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); + fallthrough; + case IGC_XDP_CONSUMED: + xsk_buff_free(bi->xdp); + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + bi->xdp = NULL; + total_bytes += size; + total_packets++; + cleaned_count++; + ntc++; + if (ntc == ring->count) + ntc = 0; + } + + ring->next_to_clean = ntc; + rcu_read_unlock(); + + if (cleaned_count >= IGC_RX_BUFFER_WRITE) + failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (xsk_uses_need_wakeup(ring->xsk_pool)) { + if (failure || ring->next_to_clean == ring->next_to_use) + xsk_set_rx_need_wakeup(ring->xsk_pool); + else + xsk_clear_rx_need_wakeup(ring->xsk_pool); + return total_packets; + } + + return failure ? budget : total_packets; +} + +static void igc_update_tx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->tx.ring; + + u64_stats_update_begin(&ring->tx_syncp); + ring->tx_stats.bytes += bytes; + ring->tx_stats.packets += packets; + u64_stats_update_end(&ring->tx_syncp); + + q_vector->tx.total_bytes += bytes; + q_vector->tx.total_packets += packets; +} + +static void igc_xdp_xmit_zc(struct igc_ring *ring) +{ + struct xsk_buff_pool *pool = ring->xsk_pool; + struct netdev_queue *nq = txring_txq(ring); + union igc_adv_tx_desc *tx_desc = NULL; + int cpu = smp_processor_id(); + u16 ntu = ring->next_to_use; + struct xdp_desc xdp_desc; + u16 budget; + + if (!netif_carrier_ok(ring->netdev)) + return; + + __netif_tx_lock(nq, cpu); + + budget = igc_desc_unused(ring); + + while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { + u32 cmd_type, olinfo_status; + struct igc_tx_buffer *bi; + dma_addr_t dma; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + xdp_desc.len; + olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; + + dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); + + tx_desc = IGC_TX_DESC(ring, ntu); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + bi = &ring->tx_buffer_info[ntu]; + bi->type = IGC_TX_BUFFER_TYPE_XSK; + bi->protocol = 0; + bi->bytecount = xdp_desc.len; + bi->gso_segs = 1; + bi->time_stamp = jiffies; + bi->next_to_watch = tx_desc; + + netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); + + ntu++; + if (ntu == ring->count) + ntu = 0; + } + + ring->next_to_use = ntu; + if (tx_desc) { + igc_flush_tx_descriptors(ring); + xsk_tx_release(pool); + } + + __netif_tx_unlock(nq); +} + +/** + * igc_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + */ +static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + struct igc_ring *tx_ring = q_vector->tx.ring; + unsigned int i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 xsk_frames = 0; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGC_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + if (!adapter->ecdev) { + napi_consume_skb(tx_buffer->skb, napi_budget); + } + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + if (!adapter->ecdev) { + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + } + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + igc_update_tx_stats(q_vector, total_packets, total_bytes); + + if (tx_ring->xsk_pool) { + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) + xsk_set_tx_need_wakeup(tx_ring->xsk_pool); + igc_xdp_xmit_zc(tx_ring); + } + + if (!adapter->ecdev && + test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct igc_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && + (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && + !tx_ring->oper_gate_closed) { + /* detected Tx unit hang */ + netdev_err(tx_ring->netdev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(IGC_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(!adapter->ecdev && total_packets && + netif_carrier_ok(tx_ring->netdev) && + igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGC_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +static int igc_find_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 ral, rah; + int i; + + for (i = 0; i < max_entries; i++) { + ral = rd32(IGC_RAL(i)); + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + continue; + if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) + continue; + if ((rah & IGC_RAH_RAH_MASK) != + le16_to_cpup((__le16 *)(addr + 4))) + continue; + if (ral != le32_to_cpup((__le32 *)(addr))) + continue; + + return i; + } + + return -1; +} + +static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 rah; + int i; + + for (i = 0; i < max_entries; i++) { + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + return i; + } + + return -1; +} + +/** + * igc_add_mac_filter() - Add MAC address filter + * @adapter: Pointer to adapter where the filter should be added + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr, + int queue) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index >= 0) + goto update_filter; + + index = igc_get_avail_mac_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", + index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr, queue); + +update_filter: + igc_set_mac_filter_hw(adapter, index, type, addr, queue); + return 0; +} + +/** + * igc_del_mac_filter() - Delete MAC address filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @type: MAC address filter type (source or destination) + * @addr: MAC address + */ +static void igc_del_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index < 0) + return; + + if (index == 0) { + /* If this is the default filter, we don't actually delete it. + * We just reset to its default value i.e. disable queue + * assignment. + */ + netdev_dbg(dev, "Disable default MAC filter queue assignment"); + + igc_set_mac_filter_hw(adapter, 0, type, addr, -1); + } else { + netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", + index, + type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr); + + igc_clear_mac_filter_hw(adapter, index); + } +} + +/** + * igc_add_vlan_prio_filter() - Add VLAN priority filter + * @adapter: Pointer to adapter where the filter should be added + * @prio: VLAN priority value + * @queue: Queue number which matching frames are assigned to + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, + int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + if (vlanpqf & IGC_VLANPQF_VALID(prio)) { + netdev_dbg(dev, "VLAN priority filter already in use\n"); + return -EEXIST; + } + + vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); + vlanpqf |= IGC_VLANPQF_VALID(prio); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", + prio, queue); + return 0; +} + +/** + * igc_del_vlan_prio_filter() - Delete VLAN priority filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @prio: VLAN priority value + */ +static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) +{ + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + vlanpqf &= ~IGC_VLANPQF_VALID(prio); + vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", + prio); +} + +static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if (!(etqf & IGC_ETQF_FILTER_ENABLE)) + return i; + } + + return -1; +} + +/** + * igc_add_etype_filter() - Add ethertype filter + * @adapter: Pointer to adapter where the filter should be added + * @etype: Ethertype value + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, + int queue) +{ + struct igc_hw *hw = &adapter->hw; + int index; + u32 etqf; + + index = igc_get_avail_etype_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + etqf = rd32(IGC_ETQF(index)); + + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= etype; + + if (queue >= 0) { + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); + etqf |= IGC_ETQF_QUEUE_ENABLE; + } + + etqf |= IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(index), etqf); + + netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", + etype, queue); + return 0; +} + +static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) + return i; + } + + return -1; +} + +/** + * igc_del_etype_filter() - Delete ethertype filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @etype: Ethertype value + */ +static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int index; + + index = igc_find_etype_filter(adapter, etype); + if (index < 0) + return; + + wr32(IGC_ETQF(index), 0); + + netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", + etype); +} + +static int igc_flex_filter_select(struct igc_adapter *adapter, + struct igc_flex_filter *input, + u32 *fhft) +{ + struct igc_hw *hw = &adapter->hw; + u8 fhft_index; + u32 fhftsl; + + if (input->index >= MAX_FLEX_FILTER) { + dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); + return -EINVAL; + } + + /* Indirect table select register */ + fhftsl = rd32(IGC_FHFTSL); + fhftsl &= ~IGC_FHFTSL_FTSL_MASK; + switch (input->index) { + case 0 ... 7: + fhftsl |= 0x00; + break; + case 8 ... 15: + fhftsl |= 0x01; + break; + case 16 ... 23: + fhftsl |= 0x02; + break; + case 24 ... 31: + fhftsl |= 0x03; + break; + } + wr32(IGC_FHFTSL, fhftsl); + + /* Normalize index down to host table register */ + fhft_index = input->index % 8; + + *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : + IGC_FHFT_EXT(fhft_index - 4); + + return 0; +} + +static int igc_write_flex_filter_ll(struct igc_adapter *adapter, + struct igc_flex_filter *input) +{ + struct device *dev = &adapter->pdev->dev; + struct igc_hw *hw = &adapter->hw; + u8 *data = input->data; + u8 *mask = input->mask; + u32 queuing; + u32 fhft; + u32 wufc; + int ret; + int i; + + /* Length has to be aligned to 8. Otherwise the filter will fail. Bail + * out early to avoid surprises later. + */ + if (input->length % 8 != 0) { + dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); + return -EINVAL; + } + + /* Select corresponding flex filter register and get base for host table. */ + ret = igc_flex_filter_select(adapter, input, &fhft); + if (ret) + return ret; + + /* When adding a filter globally disable flex filter feature. That is + * recommended within the datasheet. + */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + + /* Configure filter */ + queuing = input->length & IGC_FHFT_LENGTH_MASK; + queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; + queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; + + if (input->immediate_irq) + queuing |= IGC_FHFT_IMM_INT; + + if (input->drop) + queuing |= IGC_FHFT_DROP; + + wr32(fhft + 0xFC, queuing); + + /* Write data (128 byte) and mask (128 bit) */ + for (i = 0; i < 16; ++i) { + const size_t data_idx = i * 8; + const size_t row_idx = i * 16; + u32 dw0 = + (data[data_idx + 0] << 0) | + (data[data_idx + 1] << 8) | + (data[data_idx + 2] << 16) | + (data[data_idx + 3] << 24); + u32 dw1 = + (data[data_idx + 4] << 0) | + (data[data_idx + 5] << 8) | + (data[data_idx + 6] << 16) | + (data[data_idx + 7] << 24); + u32 tmp; + + /* Write row: dw0, dw1 and mask */ + wr32(fhft + row_idx, dw0); + wr32(fhft + row_idx + 4, dw1); + + /* mask is only valid for MASK(7, 0) */ + tmp = rd32(fhft + row_idx + 8); + tmp &= ~GENMASK(7, 0); + tmp |= mask[i]; + wr32(fhft + row_idx + 8, tmp); + } + + /* Enable filter. */ + wufc |= IGC_WUFC_FLEX_HQ; + if (input->index > 8) { + /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); + + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc |= (IGC_WUFC_FLX0 << input->index); + } + wr32(IGC_WUFC, wufc); + + dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", + input->index); + + return 0; +} + +static void igc_flex_filter_add_field(struct igc_flex_filter *flex, + const void *src, unsigned int offset, + size_t len, const void *mask) +{ + int i; + + /* data */ + memcpy(&flex->data[offset], src, len); + + /* mask */ + for (i = 0; i < len; ++i) { + const unsigned int idx = i + offset; + const u8 *ptr = mask; + + if (mask) { + if (ptr[i] & 0xff) + flex->mask[idx / 8] |= BIT(idx % 8); + + continue; + } + + flex->mask[idx / 8] |= BIT(idx % 8); + } +} + +static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + int i; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + for (i = 0; i < MAX_FLEX_FILTER; i++) { + if (i < 8) { + if (!(wufc & (IGC_WUFC_FLX0 << i))) + return i; + } else { + if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) + return i; + } + } + + return -ENOSPC; +} + +static bool igc_flex_filter_in_use(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + if (wufc & IGC_WUFC_FILTER_MASK) + return true; + + if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) + return true; + + return false; +} + +static int igc_add_flex_filter(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct igc_flex_filter flex = { }; + struct igc_nfc_filter *filter = &rule->filter; + unsigned int eth_offset, user_offset; + int ret, index; + bool vlan; + + index = igc_find_avail_flex_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + /* Construct the flex filter: + * -> dest_mac [6] + * -> src_mac [6] + * -> tpid [2] + * -> vlan tci [2] + * -> ether type [2] + * -> user data [8] + * -> = 26 bytes => 32 length + */ + flex.index = index; + flex.length = 32; + flex.rx_queue = rule->action; + + vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; + eth_offset = vlan ? 16 : 12; + user_offset = vlan ? 18 : 14; + + /* Add destination MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, + ETH_ALEN, NULL); + + /* Add source MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->src_addr, 6, + ETH_ALEN, NULL); + + /* Add VLAN etype */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) + igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, + sizeof(filter->vlan_etype), + NULL); + + /* Add VLAN TCI */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, + sizeof(filter->vlan_tci), NULL); + + /* Add Ether type */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + __be16 etype = cpu_to_be16(filter->etype); + + igc_flex_filter_add_field(&flex, &etype, eth_offset, + sizeof(etype), NULL); + } + + /* Add user data */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) + igc_flex_filter_add_field(&flex, &filter->user_data, + user_offset, + sizeof(filter->user_data), + filter->user_mask); + + /* Add it down to the hardware and enable it. */ + ret = igc_write_flex_filter_ll(adapter, &flex); + if (ret) + return ret; + + filter->flex_index = index; + + return 0; +} + +static void igc_del_flex_filter(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc; + + /* Just disable the filter. The filter table itself is kept + * intact. Another flex_filter_add() should override the "old" data + * then. + */ + if (reg_index > 8) { + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc = rd32(IGC_WUFC); + + wufc &= ~(IGC_WUFC_FLX0 << reg_index); + wr32(IGC_WUFC, wufc); + } + + if (igc_flex_filter_in_use(adapter)) + return; + + /* No filters are in use, we may disable flex filters */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); +} + +static int igc_enable_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + int err; + + if (rule->flex) { + return igc_add_flex_filter(adapter, rule); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_add_etype_filter(adapter, rule->filter.etype, + rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + err = igc_add_vlan_prio_filter(adapter, prio, rule->action); + if (err) + return err; + } + + return 0; +} + +static void igc_disable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + if (rule->flex) { + igc_del_flex_filter(adapter, rule->filter.flex_index); + return; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_del_etype_filter(adapter, rule->filter.etype); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + igc_del_vlan_prio_filter(adapter, prio); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr); +} + +/** + * igc_get_nfc_rule() - Get NFC rule + * @adapter: Pointer to adapter + * @location: Rule location + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: Pointer to NFC rule at @location. If not found, NULL. + */ +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location) +{ + struct igc_nfc_rule *rule; + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (rule->location == location) + return rule; + if (rule->location > location) + break; + } + + return NULL; +} + +/** + * igc_del_nfc_rule() - Delete NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be deleted + * + * Disable NFC rule in hardware and delete it from adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + */ +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + igc_disable_nfc_rule(adapter, rule); + + list_del(&rule->list); + adapter->nfc_rule_count--; + + kfree(rule); +} + +static void igc_flush_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule, *tmp; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +/** + * igc_add_nfc_rule() - Add NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be added + * + * Enable NFC rule in hardware and add it to adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 on success, negative errno on failure. + */ +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + struct igc_nfc_rule *pred, *cur; + int err; + + err = igc_enable_nfc_rule(adapter, rule); + if (err) + return err; + + pred = NULL; + list_for_each_entry(cur, &adapter->nfc_rule_list, list) { + if (cur->location >= rule->location) + break; + pred = cur; + } + + list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); + adapter->nfc_rule_count++; + return 0; +} + +static void igc_restore_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) + igc_enable_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); + return 0; +} + +/** + * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + */ +static void igc_set_rx_mode(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= IGC_RCTL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igc_write_mc_addr_list(netdev); + if (count < 0) + rctl |= IGC_RCTL_MPE; + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) + rctl |= IGC_RCTL_UPE; + + /* update state of unicast and multicast */ + rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); + wr32(IGC_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) + rlpml = IGC_MAX_FRAME_BUILD_SKB; +#endif + wr32(IGC_RLPML, rlpml); +} + +/** + * igc_configure - configure the hardware for RX and TX + * @adapter: private board structure + */ +static void igc_configure(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i = 0; + + igc_get_hw_control(adapter); + igc_set_rx_mode(netdev); + + igc_restore_vlan(adapter); + + igc_setup_tctl(adapter); + igc_setup_mrqc(adapter); + igc_setup_rctl(adapter); + + igc_set_default_mac_filter(adapter); + igc_restore_nfc_rules(adapter); + + igc_configure_tx(adapter); + igc_configure_rx(adapter); + + igc_rx_fifo_flush_base(&adapter->hw); + + /* call igc_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); + } +} + +/** + * igc_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + */ +static void igc_write_ivar(struct igc_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(IGC_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | IGC_IVAR_VALID) << offset; + + array_wr32(IGC_IVAR0, index, ivar); +} + +static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + int rx_queue = IGC_N0_QUEUE; + int tx_queue = IGC_N0_QUEUE; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case igc_i225: + if (rx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igc_configure_msix - Configure MSI-X hardware + * @adapter: Pointer to adapter structure + * + * igc_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + */ +static void igc_configure_msix(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i, vector = 0; + u32 tmp; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case igc_i225: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | + IGC_GPIE_PBA | IGC_GPIE_EIAME | + IGC_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | IGC_IVAR_VALID) << 8; + + wr32(IGC_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igc_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void igc_irq_enable(struct igc_adapter *adapter) +{ + if (adapter->ecdev) { + /* skip enabling interrupts */ + return; + } + + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; + u32 regval = rd32(IGC_EIAC); + + wr32(IGC_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(IGC_EIAM); + wr32(IGC_EIAM, regval | adapter->eims_enable_mask); + wr32(IGC_EIMS, adapter->eims_enable_mask); + wr32(IGC_IMS, ims); + } else { + wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + } +} + +/** + * igc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void igc_irq_disable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 regval = rd32(IGC_EIAM); + + wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); + wr32(IGC_EIMC, adapter->eims_enable_mask); + regval = rd32(IGC_EIAC); + wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(IGC_IAM, 0); + wr32(IGC_IMC, ~0); + wrfl(); + + if (adapter->ecdev) { + /* skip synchonizing IRQs */ + return; + } + + if (adapter->msix_entries) { + int vector = 0, i; + + synchronize_irq(adapter->msix_entries[vector++].vector); + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues) +{ + /* Determine if we need to pair queues. */ + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; +} + +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) +{ + return IGC_MAX_RX_QUEUES; +} + +static void igc_init_queue_configuration(struct igc_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igc_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igc_set_flag_queue_pairs(adapter, max_rss_queues); +} + +/** + * igc_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igc_free_q_vector. + */ +static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* if we're coming from igc_set_interrupt_capability, the vectors are + * not yet allocated + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + if (!adapter->ecdev) { + netif_napi_del(&q_vector->napi); + } +} + +/** + * igc_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + */ +static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igc_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igc_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + */ +static void igc_free_q_vectors(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igc_reset_q_vector(adapter, v_idx); + igc_free_q_vector(adapter, v_idx); + } +} + +/** + * igc_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + */ +static void igc_update_itr(struct igc_q_vector *q_vector, + struct igc_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes / packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igc_set_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + current_itr = 0; + new_itr = IGC_4K_ITR; + goto set_itr_now; + default: + break; + } + + igc_update_itr(q_vector, &q_vector->tx); + igc_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igc_reset_interrupt_capability(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IGC_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); + } + + while (v_idx--) + igc_reset_q_vector(adapter, v_idx); +} + +/** + * igc_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: Pointer to adapter structure + * @msix: boolean value for MSI-X capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static void igc_set_interrupt_capability(struct igc_adapter *adapter, + bool msix) +{ + int numvecs, i; + int err; + + if (!msix) + goto msi_only; + adapter->flags |= IGC_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + + adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), + GFP_KERNEL); + + if (!adapter->msix_entries) + return; + + /* populate entry values */ + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + igc_reset_interrupt_capability(adapter); + +msi_only: + adapter->flags &= ~IGC_FLAG_HAS_MSIX; + + adapter->rss_queues = 1; + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGC_FLAG_HAS_MSI; +} + +/** + * igc_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igc_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + */ +static void igc_update_ring_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + new_val = IGC_4K_ITR; + goto set_itr_val; + default: + break; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if (avg_wire_size > 300 && avg_wire_size < 1200) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGC_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGC_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +static void igc_ring_irq_enable(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if (adapter->num_q_vectors == 1) + igc_set_itr(q_vector); + else + igc_update_ring_itr(q_vector); + } + + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->msix_entries) + wr32(IGC_EIMS, q_vector->eims_value); + else + igc_irq_enable(adapter); + } +} + +static void igc_add_ring(struct igc_ring *ring, + struct igc_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igc_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + */ +static void igc_cache_ring_register(struct igc_adapter *adapter) +{ + int i = 0, j = 0; + + switch (adapter->hw.mac.type) { + case igc_i225: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = j; + break; + } +} + +/** + * igc_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + */ +static int igc_poll(struct napi_struct *napi, int budget) +{ + struct igc_q_vector *q_vector = container_of(napi, + struct igc_q_vector, + napi); + struct igc_ring *rx_ring = q_vector->rx.ring; + bool clean_complete = true; + int work_done = 0; + + if (q_vector->tx.ring) + clean_complete = igc_clean_tx_irq(q_vector, budget); + + if (rx_ring) { + int cleaned = rx_ring->xsk_pool ? + igc_clean_rx_irq_zc(q_vector, budget) : + igc_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igc_ring_irq_enable(q_vector); + + return min(work_done, budget - 1); +} + +/** + * igc_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int igc_alloc_q_vector(struct igc_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct igc_q_vector *q_vector; + struct igc_ring *ring; + int ring_count; + + /* igc only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); + else + memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); + if (!q_vector) + return -ENOMEM; + + if (!adapter->ecdev) { + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); + } + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + IGC_EITR(0); + q_vector->itr_val = IGC_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* initialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igc_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igc_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + +/** + * igc_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int igc_alloc_q_vectors(struct igc_adapter *adapter) +{ + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int q_vectors = adapter->num_q_vectors; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igc_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: Pointer to adapter structure + * @msix: boolean for MSI-X capability + * + * This function initializes the interrupts and allocates all of the queues. + */ +static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) +{ + struct net_device *dev = adapter->netdev; + int err = 0; + + igc_set_interrupt_capability(adapter, msix); + + err = igc_alloc_q_vectors(adapter); + if (err) { + netdev_err(dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igc_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igc_reset_interrupt_capability(adapter); + return err; +} + +/** + * igc_sw_init - Initialize general software structures (struct igc_adapter) + * @adapter: board private structure to initialize + * + * igc_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int igc_sw_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGC_DEFAULT_TXD; + adapter->rx_ring_count = IGC_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGC_DEFAULT_ITR; + adapter->tx_itr_setting = IGC_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; + + /* adjust max frame to be at least the size of a standard frame */ + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + mutex_init(&adapter->nfc_rule_lock); + INIT_LIST_HEAD(&adapter->nfc_rule_list); + adapter->nfc_rule_count = 0; + + spin_lock_init(&adapter->stats64_lock); + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGC_FLAG_HAS_MSIX; + + igc_init_queue_configuration(adapter); + + /* This call may decrease the number of queues */ + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igc_irq_disable(adapter); + + set_bit(__IGC_DOWN, &adapter->state); + + return 0; +} + +/** + * igc_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + */ +void igc_up(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i = 0; + + /* hardware has been reset, we need to reload some things */ + igc_configure(adapter); + + clear_bit(__IGC_DOWN, &adapter->state); + if (!adapter->ecdev) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + } + if (adapter->msix_entries) + igc_configure_msix(adapter); + else + igc_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!adapter->ecdev) { + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + } +} + +/** + * igc_update_stats - Update the board statistics counters + * @adapter: board private structure + */ +void igc_update_stats(struct igc_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.tlpic += rd32(IGC_TLPIC); + adapter->stats.rlpic += rd32(IGC_RLPIC); + adapter->stats.hgptc += rd32(IGC_HGPTC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + adapter->stats.colc += rd32(IGC_RERC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + + adapter->stats.iac += rd32(IGC_IAC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped */ + net_stats->tx_dropped = adapter->stats.txdrop; + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); +} + +/** + * igc_down - Close the interface + * @adapter: board private structure + */ +void igc_down(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i = 0; + + set_bit(__IGC_DOWN, &adapter->state); + + igc_ptp_suspend(adapter); + + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } + /* set trans_start so we don't get spurious watchdogs during reset */ + netif_trans_update(netdev); + if (!adapter->ecdev) { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); + + igc_irq_disable(adapter); + } + + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (!adapter->ecdev && adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igc_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; + + igc_clean_all_tx_rings(adapter); + igc_clean_all_rx_rings(adapter); +} + +void igc_reinit_locked(struct igc_adapter *adapter) +{ + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igc_down(adapter); + igc_up(adapter); + clear_bit(__IGC_RESETTING, &adapter->state); +} + +static void igc_reset_task(struct work_struct *work) +{ + struct igc_adapter *adapter; + + adapter = container_of(work, struct igc_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGC_DOWN, &adapter->state) || + test_bit(__IGC_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igc_rings_dump(adapter); + igc_regs_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igc_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igc_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int igc_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(netdev, "Jumbo frames not supported with XDP"); + return -EINVAL; + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igc_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igc_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igc_up(adapter); + else + igc_reset(adapter); + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +/** + * igc_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: queue number that timed out + **/ +static void igc_tx_timeout(struct net_device *netdev, + unsigned int __always_unused txqueue) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + wr32(IGC_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +/** + * igc_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. + */ +static void igc_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + if (!test_bit(__IGC_RESETTING, &adapter->state)) + igc_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igc_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igc_vlan_mode(netdev, features); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) + igc_flush_nfc_rules(adapter); + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; +} + +static netdev_features_t +igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igc_tsync_interrupt(struct igc_adapter *adapter) +{ + u32 ack, tsauxc, sec, nsec, tsicr; + struct igc_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + + tsicr = rd32(IGC_TSICR); + ack = 0; + + if (tsicr & IGC_TSICR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_SYS_WRAP; + } + + if (tsicr & IGC_TSICR_TXTS) { + /* retrieve hardware timestamp */ + igc_ptp_tx_tstamp_event(adapter); + ack |= IGC_TSICR_TXTS; + } + + if (tsicr & IGC_TSICR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT0; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT0; + } + + if (tsicr & IGC_TSICR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT1; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT1; + } + + if (tsicr & IGC_TSICR_AUTT0) { + nsec = rd32(IGC_AUXSTMPL0); + sec = rd32(IGC_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT0; + } + + if (tsicr & IGC_TSICR_AUTT1) { + nsec = rd32(IGC_AUXSTMPL1); + sec = rd32(IGC_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(IGC_TSICR, ack); +} + +/** + * igc_msix_other - msix other interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t igc_msix_other(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_hw *hw = &adapter->hw; + u32 icr = rd32(IGC_ICR); + + /* reading ICR causes bit 31 of EICR to be cleared */ + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & IGC_ICR_LSC) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + wr32(IGC_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igc_write_itr(struct igc_q_vector *q_vector) +{ + u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = IGC_ITR_VAL_MASK; + + itr_val |= IGC_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igc_msix_ring(int irq, void *data) +{ + struct igc_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igc_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_request_msix - Initialize MSI-X interrupts + * @adapter: Pointer to adapter structure + * + * igc_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + */ +static int igc_request_msix(struct igc_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + int i = 0, err = 0, vector = 0, free_vector = 0; + struct net_device *netdev = adapter->netdev; + if (adapter->ecdev) { + /* avoid requesting MSI-X. */ + return 0; + } + + err = request_irq(adapter->msix_entries[vector].vector, + &igc_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igc_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igc_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igc_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: Pointer to adapter structure + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) +{ + igc_free_q_vectors(adapter); + igc_reset_interrupt_capability(adapter); +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igc_update_phy_info(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); + + igc_get_phy_info(&adapter->hw); +} + +/** + * igc_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + */ +bool igc_has_link(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the igc_check_for_link establishes link + * for copper adapters ONLY + */ + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + + if (hw->mac.type == igc_i225) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +/** + * igc_watchdog - Timer Call-back + * @t: timer for the watchdog + */ +static void igc_watchdog(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igc_watchdog_task(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, + struct igc_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_phy_info *phy = &hw->phy; + u16 phy_data, retry_count = 20; + u32 link; + int i; + + link = igc_has_link(adapter); + + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + if (link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(IGC_CTRL); + /* Link status message must follow this format */ + netdev_info(netdev, + "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & IGC_CTRL_TFCE) && + (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : + (ctrl & IGC_CTRL_RFCE) ? "RX" : + (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGC_FLAG_EEE) && + adapter->link_duplex == HALF_DUPLEX) { + netdev_info(netdev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); + adapter->hw.dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igc_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + case SPEED_1000: + case SPEED_2500: + adapter->tx_timeout_factor = 1; + break; + } + + /* Once the launch time has been set on the wire, there + * is a delay before the link speed can be determined + * based on link-up activity. Write into the register + * as soon as we know the correct link speed. + */ + igc_tsn_adjust_txtime_offset(adapter); + + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + netdev_err(netdev, "exceed max 2 second\n"); + } + } else { + netdev_err(netdev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* Links status message must follow this format */ + netdev_info(netdev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + } + } + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(IGC_EICS, eics); + } else { + wr32(IGC_ICS, IGC_ICS_RXDMT0); + } + + igc_ptp_tx_hang(adapter); + + /* Reset the timer */ + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +/** + * igc_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr_msi(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(IGC_ICR); + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(IGC_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & IGC_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igc_free_irq(struct igc_adapter *adapter) +{ + if (adapter->ecdev) { + /* no IRQ to free in EtherCAT operation */ + return; + } + + if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igc_request_irq - initialize interrupts + * @adapter: Pointer to adapter structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static int igc_request_irq(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + err = igc_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + igc_clear_interrupt_scheme(adapter); + err = igc_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + igc_setup_all_tx_resources(adapter); + igc_setup_all_rx_resources(adapter); + igc_configure(adapter); + } + + igc_assign_vector(adapter->q_vector[0], 0); + + if (!adapter->ecdev && adapter->flags & IGC_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, &igc_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igc_reset_interrupt_capability(adapter); + adapter->flags &= ~IGC_FLAG_HAS_MSI; + } + + if (!adapter->ecdev) { + err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + netdev_err(netdev, "Error %d getting interrupt\n", err); + } +request_done: + return err; +} + +/** + * __igc_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: boolean indicating if the device is resuming + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int __igc_open(struct net_device *netdev, bool resuming) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + int err = 0; + int i = 0; + + /* disallow open during test */ + + if (test_bit(__IGC_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 0); + } + else { + netif_carrier_off(netdev); + } + + /* allocate transmit descriptors */ + err = igc_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igc_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igc_power_up_link(adapter); + + igc_configure(adapter); + + err = igc_request_irq(adapter); + if (err) + goto err_req_irq; + + if (!adapter->ecdev) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + } + + clear_bit(__IGC_DOWN, &adapter->state); + if (!adapter->ecdev) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + } + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + if (!adapter->ecdev) { + netif_tx_start_all_queues(netdev); + } + + if (!adapter->ecdev) { + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + } + + return IGC_SUCCESS; + +err_set_queues: + igc_free_irq(adapter); +err_req_irq: + igc_release_hw_control(adapter); + igc_power_down_phy_copper_base(&adapter->hw); + igc_free_all_rx_resources(adapter); +err_setup_rx: + igc_free_all_tx_resources(adapter); +err_setup_tx: + igc_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igc_open(struct net_device *netdev) +{ + return __igc_open(netdev, false); +} + +/** + * __igc_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: boolean indicating the device is suspending + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int __igc_close(struct net_device *netdev, bool suspending) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igc_down(adapter); + + igc_release_hw_control(adapter); + + igc_free_irq(adapter); + + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +int igc_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igc_close(netdev, false); + return 0; +} + +/** + * igc_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return igc_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igc_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, + bool enable) +{ + struct igc_ring *ring; + + if (queue < 0 || queue >= adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + +static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) +{ + struct timespec64 b; + + b = ktime_to_timespec64(base_time); + + return timespec64_compare(now, &b) > 0; +} + +static bool validate_schedule(struct igc_adapter *adapter, + const struct tc_taprio_qopt_offload *qopt) +{ + int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; + struct timespec64 now; + size_t n; + + if (qopt->cycle_time_extension) + return false; + + igc_ptp_read(adapter, &now); + + /* If we program the controller's BASET registers with a time + * in the future, it will hold all the packets until that + * time, causing a lot of TX Hangs, so to avoid that, we + * reject schedules that would start in the future. + * Note: Limitation above is no longer in i226. + */ + if (!is_base_time_past(qopt->base_time, &now) && + igc_is_device_id_i225(hw)) + return false; + + for (n = 0; n < qopt->num_entries; n++) { + const struct tc_taprio_sched_entry *e, *prev; + int i; + + prev = n ? &qopt->entries[n - 1] : NULL; + e = &qopt->entries[n]; + + /* i225 only supports "global" frame preemption + * settings. + */ + if (e->command != TC_TAPRIO_CMD_SET_GATES) + return false; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (e->gate_mask & BIT(i)) { + queue_uses[i]++; + + /* There are limitations: A single queue cannot + * be opened and closed multiple times per cycle + * unless the gate stays open. Check for it. + */ + if (queue_uses[i] > 1 && + !(prev->gate_mask & BIT(i))) + return false; + } + } + + return true; +} + +static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ + int i; + + adapter->base_time = 0; + adapter->cycle_time = NSEC_PER_SEC; + adapter->taprio_offload_enable = false; + adapter->qbv_config_change_errors = 0; + adapter->qbv_transition = false; + adapter->qbv_count = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + ring->max_sdu = 0; + ring->oper_gate_closed = false; + ring->admin_gate_closed = false; + } + + return 0; +} + +static int igc_save_qbv_schedule(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + bool queue_configured[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; + u32 start_time = 0, end_time = 0; + struct timespec64 now; + size_t n; + int i; + + if (qopt->cmd == TAPRIO_CMD_DESTROY) + return igc_tsn_clear_schedule(adapter); + + if (qopt->cmd != TAPRIO_CMD_REPLACE) + return -EOPNOTSUPP; + + if (qopt->base_time < 0) + return -ERANGE; + + if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) + return -EALREADY; + + if (!validate_schedule(adapter, qopt)) + return -EINVAL; + + adapter->cycle_time = qopt->cycle_time; + adapter->base_time = qopt->base_time; + adapter->taprio_offload_enable = true; + + igc_ptp_read(adapter, &now); + + for (n = 0; n < qopt->num_entries; n++) { + struct tc_taprio_sched_entry *e = &qopt->entries[n]; + + end_time += e->interval; + + /* If any of the conditions below are true, we need to manually + * control the end time of the cycle. + * 1. Qbv users can specify a cycle time that is not equal + * to the total GCL intervals. Hence, recalculation is + * necessary here to exclude the time interval that + * exceeds the cycle time. + * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, + * once the end of the list is reached, it will switch + * to the END_OF_CYCLE state and leave the gates in the + * same state until the next cycle is started. + */ + if (end_time > adapter->cycle_time || + n + 1 == qopt->num_entries) + end_time = adapter->cycle_time; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!(e->gate_mask & BIT(i))) + continue; + + /* Check whether a queue stays open for more than one + * entry. If so, keep the start and advance the end + * time. + */ + if (!queue_configured[i]) + ring->start_time = start_time; + ring->end_time = end_time; + + if (ring->start_time >= adapter->cycle_time) + queue_configured[i] = false; + else + queue_configured[i] = true; + } + + start_time += e->interval; + } + + /* Check whether a queue gets configured. + * If not, set the start and end time to be end time. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!is_base_time_past(qopt->base_time, &now)) { + ring->admin_gate_closed = false; + } else { + ring->oper_gate_closed = false; + ring->admin_gate_closed = false; + } + + if (!queue_configured[i]) { + if (!is_base_time_past(qopt->base_time, &now)) + ring->admin_gate_closed = true; + else + ring->oper_gate_closed = true; + + ring->start_time = end_time; + ring->end_time = end_time; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + struct net_device *dev = adapter->netdev; + + if (qopt->max_sdu[i]) + ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; + else + ring->max_sdu = 0; + } + + return 0; +} + +static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_qbv_schedule(adapter, qopt); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; + struct net_device *netdev = adapter->netdev; + struct igc_ring *ring; + int i; + + /* i225 has two sets of credit-based shaper logic. + * Supporting it only on the top two priority queues + */ + if (queue < 0 || queue > 1) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + for (i = 0; i < IGC_MAX_SR_QUEUES; i++) + if (adapter->tx_ring[i]) + cbs_status[i] = adapter->tx_ring[i]->cbs_enable; + + /* CBS should be enabled on the highest priority queue first in order + * for the CBS algorithm to operate as intended. + */ + if (enable) { + if (queue == 1 && !cbs_status[0]) { + netdev_err(netdev, + "Enabling CBS on queue1 before queue0\n"); + return -EINVAL; + } + } else { + if (queue == 0 && cbs_status[1]) { + netdev_err(netdev, + "Disabling CBS on queue0 before queue1\n"); + return -EINVAL; + } + } + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static int igc_tsn_enable_cbs(struct igc_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_tc_query_caps(struct igc_adapter *adapter, + struct tc_query_caps_base *base) +{ + struct igc_hw *hw = &adapter->hw; + + switch (base->type) { + case TC_SETUP_QDISC_TAPRIO: { + struct tc_taprio_caps *caps = base->caps; + + caps->broken_mqprio = true; + + if (hw->mac.type == igc_i225) { + caps->supports_queue_max_sdu = true; + caps->gate_mask_per_txq = true; + } + + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + adapter->tc_setup_type = type; + + switch (type) { + case TC_QUERY_CAPS: + return igc_tc_query_caps(adapter, type_data); + case TC_SETUP_QDISC_TAPRIO: + return igc_tsn_enable_qbv_scheduling(adapter, type_data); + + case TC_SETUP_QDISC_ETF: + return igc_tsn_enable_launchtime(adapter, type_data); + + case TC_SETUP_QDISC_CBS: + return igc_tsn_enable_cbs(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return igc_xdp_setup_pool(adapter, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int igc_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int i, drops; + + if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + + drops = 0; + for (i = 0; i < num_frames; i++) { + int err; + struct xdp_frame *xdpf = frames[i]; + + err = igc_xdp_init_tx_descriptor(ring, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + igc_flush_tx_descriptors(ring); + + __netif_tx_unlock(nq); + + return num_frames - drops; +} + +static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, + struct igc_q_vector *q_vector) +{ + struct igc_hw *hw = &adapter->hw; + u32 eics = 0; + + eics |= q_vector->eims_value; + wr32(IGC_EICS, eics); +} + +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_q_vector *q_vector; + struct igc_ring *ring; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!igc_xdp_is_enabled(adapter)) + return -ENXIO; + + if (queue_id >= adapter->num_rx_queues) + return -EINVAL; + + ring = adapter->rx_ring[queue_id]; + + if (!ring->xsk_pool) + return -ENXIO; + + q_vector = adapter->q_vector[queue_id]; + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + igc_trigger_rxtxq_interrupt(adapter, q_vector); + + return 0; +} + +static const struct net_device_ops igc_netdev_ops = { + .ndo_open = igc_open, + .ndo_stop = igc_close, + .ndo_start_xmit = igc_xmit_frame, + .ndo_set_rx_mode = igc_set_rx_mode, + .ndo_set_mac_address = igc_set_mac, + .ndo_change_mtu = igc_change_mtu, + .ndo_tx_timeout = igc_tx_timeout, + .ndo_get_stats64 = igc_get_stats64, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, + .ndo_eth_ioctl = igc_ioctl, + .ndo_setup_tc = igc_setup_tc, + .ndo_bpf = igc_bpf, + .ndo_xdp_xmit = igc_xdp_xmit, + .ndo_xsk_wakeup = igc_xsk_wakeup, +}; + +/** +* ec_poll - EtherCAT poll routine +* @netdev: net device structure +* +* This function can never fail. +* +**/ +void ec_poll(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + int budget = 64; + + if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) { + struct igc_hw *hw = &adapter->hw; + + bool link; + hw->mac.get_link_status = true; + link = igc_has_link(adapter); + ecdev_set_link(adapter->ecdev, link); + adapter->ec_watchdog_jiffies = jiffies; + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + if (q_vector->tx.ring) { + igc_clean_tx_irq(q_vector, budget); + } + + if (q_vector->rx.ring) { + igc_clean_rx_irq(q_vector, budget); + } + } +} + +/** + +/* PCIe configuration access */ +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_read_word(adapter->pdev, reg, value); + + return IGC_SUCCESS; +} + +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_write_word(adapter->pdev, reg, *value); + + return IGC_SUCCESS; +} + +u32 igc_rd32(struct igc_hw *hw, u32 reg) +{ + struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (IGC_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igc->netdev; + + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + WARN(pci_device_is_present(igc->pdev), + "igc: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +/* Mapping HW RSS Type to enum xdp_rss_hash_type */ +static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = { + [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2, + [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP, + [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4, + [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP, + [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX, + [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6, + [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, + [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP, + [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP, + [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX, + [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ + [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */ + [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */ + [13] = XDP_RSS_TYPE_NONE, + [14] = XDP_RSS_TYPE_NONE, + [15] = XDP_RSS_TYPE_NONE, +}; + +static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) +{ + const struct igc_xdp_buff *ctx = (void *)_ctx; + + if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) + return -ENODATA; + + *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); + *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; + + return 0; +} + +static const struct xdp_metadata_ops igc_xdp_metadata_ops = { + .xmo_rx_hash = igc_xdp_rx_hash, +}; + +static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) +{ + struct igc_adapter *adapter = container_of(timer, struct igc_adapter, + hrtimer); + unsigned int i; + + adapter->qbv_transition = true; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->admin_gate_closed) { + tx_ring->admin_gate_closed = false; + tx_ring->oper_gate_closed = true; + } else { + tx_ring->oper_gate_closed = false; + } + } + adapter->qbv_transition = false; + return HRTIMER_NORESTART; +} + +/** + * igc_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igc_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igc_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring the adapter private structure, + * and a hardware reset occur. + */ +static int igc_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct igc_adapter *adapter; + struct net_device *netdev; + struct igc_hw *hw; + const struct igc_info *ei = igc_info_tbl[ent->driver_data]; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + + err = pci_request_mem_regions(pdev, igc_driver_name); + if (err) + goto err_pci_reg; + + err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), + IGC_MAX_TX_QUEUES); + + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->port_num = hw->bus.func; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = pci_save_state(pdev); + if (err) + goto err_ioremap; + + err = -EIO; + adapter->io_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!adapter->io_addr) + goto err_ioremap; + + /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igc_netdev_ops; + netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; + igc_ethtool_set_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC and PHY function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* Add supported features to the features list*/ + netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_TSO_ECN; + netdev->features |= NETIF_F_RXHASH; + netdev->features |= NETIF_F_RXCSUM; + netdev->features |= NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_HW_TC; + +#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; + + /* setup the private structure */ + err = igc_sw_init(adapter); + if (err) + goto err_sw_init; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= netdev->features; + + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + if (igc_get_flash_presence_i225(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + eth_hw_addr_set(netdev, hw->mac.addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + /* configure RXPBSIZE and TXPBSIZE */ + wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + + timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igc_reset_task); + INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); + + hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + adapter->hrtimer.function = &igc_qbv_scheduling_timer; + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0xaf; + + hw->fc.requested_mode = igc_fc_default; + hw->fc.current_mode = igc_fc_default; + + /* By default, support wake on port A */ + adapter->flags |= IGC_FLAG_WOL_SUPPORTED; + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) + adapter->wol |= IGC_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGC_FLAG_WOL_SUPPORTED); + + igc_ptp_init(adapter); + + igc_tsn_clear_schedule(adapter); + + /* reset the hardware with the new settings */ + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); + if (adapter->ecdev) { + err = ecdev_open(adapter->ecdev); + if (err) { + ecdev_withdraw(adapter->ecdev); + goto err_register; + } + adapter->ec_watchdog_jiffies = jiffies; + } else { + strncpy(netdev->name, "eth%d", IFNAMSIZ); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + } + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + + /* print pcie link status and MAC address */ + pcie_print_link_status(pdev); + netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + /* Disable EEE for internal PHY devices */ + hw->dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + igc_set_eee_i225(hw, false, false, false); + + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + igc_release_hw_control(adapter); +err_eeprom: + if (!igc_check_reset_block(hw)) + igc_reset_phy(hw); +err_sw_init: + igc_clear_interrupt_scheme(adapter); + iounmap(adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igc_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igc_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void igc_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (adapter->ecdev) { + ecdev_close(adapter->ecdev); + ecdev_withdraw(adapter->ecdev); + } + + pm_runtime_get_noresume(&pdev->dev); + + igc_flush_nfc_rules(adapter); + + igc_ptp_stop(adapter); + + pci_disable_ptm(pdev); + pci_clear_master(pdev); + + set_bit(__IGC_DOWN, &adapter->state); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + hrtimer_cancel(&adapter->hrtimer); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + if (!adapter->ecdev) { + unregister_netdev(netdev); + } + + igc_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, adapter->io_addr); + pci_release_mem_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; + struct igc_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + bool wake; + + if (adapter->ecdev) { + ecdev_close(adapter->ecdev); + } else { + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igc_close(netdev, true); + } + + igc_ptp_suspend(adapter); + + igc_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_LU) + wufc &= ~IGC_WUFC_LNKC; + + if (wufc) { + igc_setup_rctl(adapter); + igc_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IGC_WUFC_MC) { + rctl = rd32(IGC_RCTL); + rctl |= IGC_RCTL_MPE; + wr32(IGC_RCTL, rctl); + } + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_ADVD3WUC; + wr32(IGC_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igc_disable_pcie_master(hw); + + wr32(IGC_WUC, IGC_WUC_PME_EN); + wr32(IGC_WUFC, wufc); + } else { + wr32(IGC_WUC, 0); + wr32(IGC_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igc_power_down_phy_copper_base(&adapter->hw); + else + igc_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int __maybe_unused igc_runtime_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 1); +} + +static void igc_deliver_wake_packet(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if (wupl == 0 || wupl > IGC_WUPM_BYTES) + return; + + skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igc_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + val = rd32(IGC_WUS); + if (val & WAKE_PKT_WUS) + igc_deliver_wake_packet(netdev); + + wr32(IGC_WUS, ~0); + + if (adapter->ecdev) { + ecdev_open(adapter->ecdev); + } else { + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igc_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + } + return err; +} + +static int __maybe_unused igc_runtime_resume(struct device *dev) +{ + return igc_resume(dev); +} + +static int __maybe_unused igc_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused igc_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (!igc_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} +#endif /* CONFIG_PM */ + +static void igc_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igc_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * igc_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current PCI connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igc_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igc_io_slot_reset - called after the PCI bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igc_resume routine. + **/ +static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + netdev_err(netdev, "Could not re-enable PCI device after reset\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter loses its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igc_reset(adapter); + wr32(IGC_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igc_io_resume - called when traffic can start to flow again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igc_resume routine. + */ +static void igc_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + if (igc_open(netdev)) { + netdev_err(netdev, "igc_open failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + rtnl_unlock(); +} + +static const struct pci_error_handlers igc_err_handler = { + .error_detected = igc_io_error_detected, + .slot_reset = igc_io_slot_reset, + .resume = igc_io_resume, +}; + +#ifdef CONFIG_PM +static const struct dev_pm_ops igc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) + SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, + igc_runtime_idle) +}; +#endif + +static struct pci_driver igc_driver = { + .name = igc_driver_name, + .id_table = igc_pci_tbl, + .probe = igc_probe, + .remove = igc_remove, +#ifdef CONFIG_PM + .driver.pm = &igc_pm_ops, +#endif + .shutdown = igc_shutdown, + .err_handler = &igc_err_handler, +}; + +/** + * igc_reinit_queues - return error + * @adapter: pointer to adapter structure + */ +int igc_reinit_queues(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (netif_running(netdev)) + igc_close(netdev); + + igc_reset_interrupt_capability(adapter); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igc_open(netdev); + + return err; +} + +/** + * igc_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + */ +struct net_device *igc_get_hw_dev(struct igc_hw *hw) +{ + struct igc_adapter *adapter = hw->back; + + return adapter->netdev; +} + +static void igc_disable_rx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 rxdctl; + + rxdctl = rd32(IGC_RXDCTL(idx)); + rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; + rxdctl |= IGC_RXDCTL_SWFLUSH; + wr32(IGC_RXDCTL(idx), rxdctl); +} + +void igc_disable_rx_ring(struct igc_ring *ring) +{ + igc_disable_rx_ring_hw(ring); + igc_clean_rx_ring(ring); +} + +void igc_enable_rx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_rx_ring(adapter, ring); + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); +} + +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 txdctl; + + txdctl = rd32(IGC_TXDCTL(idx)); + txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; + txdctl |= IGC_TXDCTL_SWFLUSH; + wr32(IGC_TXDCTL(idx), txdctl); +} + +void igc_disable_tx_ring(struct igc_ring *ring) +{ + igc_disable_tx_ring_hw(ring); + igc_clean_tx_ring(ring); +} + +void igc_enable_tx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_tx_ring(adapter, ring); +} + +/** + * igc_init_module - Driver Registration Routine + * + * igc_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init igc_init_module(void) +{ + int ret; + + pr_info("%s\n", igc_driver_string); + pr_info("%s\n", igc_copyright); + + ret = pci_register_driver(&igc_driver); + return ret; +} + +module_init(igc_init_module); + +/** + * igc_exit_module - Driver Exit Cleanup Routine + * + * igc_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit igc_exit_module(void) +{ + pci_unregister_driver(&igc_driver); +} + +module_exit(igc_exit_module); +/* igc_main.c */ diff --git a/devices/igc/igc_main-6.4-orig.c b/devices/igc/igc_main-6.4-orig.c new file mode 100644 index 00000000..44aa4342 --- /dev/null +++ b/devices/igc/igc_main-6.4-orig.c @@ -0,0 +1,7325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "igc.h" +#include "igc_hw.h" +#include "igc_tsn.h" +#include "igc_xdp.h" + +#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +#define IGC_XDP_PASS 0 +#define IGC_XDP_CONSUMED BIT(0) +#define IGC_XDP_TX BIT(1) +#define IGC_XDP_REDIRECT BIT(2) + +static int debug = -1; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL v2"); +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +char igc_driver_name[] = "igc"; +static const char igc_driver_string[] = DRV_SUMMARY; +static const char igc_copyright[] = + "Copyright(c) 2018 Intel Corporation."; + +static const struct igc_info *igc_info_tbl[] = { + [board_base] = &igc_base_info, +}; + +static const struct pci_device_id igc_pci_tbl[] = { + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, igc_pci_tbl); + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +void igc_reset(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition PBA for greater than 9k MTU if required */ + pba = IGC_PBA_34K; + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + hw->mac.ops.reset_hw(hw); + + if (hw->mac.ops.init_hw(hw)) + netdev_err(dev, "Error on hardware initialization\n"); + + /* Re-establish EEE setting */ + igc_set_eee_i225(hw, true, true, true); + + if (!netif_running(adapter->netdev)) + igc_power_down_phy_copper_base(&adapter->hw); + + /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ + wr32(IGC_VET, ETH_P_8021Q); + + /* Re-enable PTP, where applicable. */ + igc_ptp_reset(adapter); + + /* Re-enable TSN offloading, where applicable. */ + igc_tsn_reset(adapter); + + igc_get_phy_info(hw); +} + +/** + * igc_power_up_link - Power up the phy link + * @adapter: address of board private structure + */ +static void igc_power_up_link(struct igc_adapter *adapter) +{ + igc_reset_phy(&adapter->hw); + + igc_power_up_phy_copper(&adapter->hw); + + igc_setup_link(&adapter->hw); +} + +/** + * igc_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void igc_release_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + if (!pci_device_is_present(adapter->pdev)) + return; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); +} + +/** + * igc_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + */ +static void igc_get_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); +} + +static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) +{ + dma_unmap_single(dev, dma_unmap_addr(buf, dma), + dma_unmap_len(buf, len), DMA_TO_DEVICE); + + dma_unmap_len_set(buf, len, 0); +} + +/** + * igc_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + */ +static void igc_clean_tx_ring(struct igc_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + u32 xsk_frames = 0; + + while (i != tx_ring->next_to_use) { + union igc_adv_tx_desc *eop_desc, *tx_desc; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + dev_kfree_skb_any(tx_buffer->skb); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGC_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + if (tx_ring->xsk_pool && xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* Zero out the buffer ring */ + memset(tx_ring->tx_buffer_info, 0, + sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igc_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + */ +void igc_free_tx_resources(struct igc_ring *tx_ring) +{ + igc_disable_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igc_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void igc_free_all_tx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igc_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_tx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igc_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igc_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + */ +int igc_setup_tx_resources(struct igc_ring *tx_ring) +{ + struct net_device *ndev = tx_ring->netdev; + struct device *dev = tx_ring->dev; + int size = 0; + + size = sizeof(struct igc_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_tx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igc_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + netdev_err(dev, "Error on Tx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } +} + +static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) +{ + struct igc_rx_buffer *bi; + u16 i; + + for (i = 0; i < ring->count; i++) { + bi = &ring->rx_buffer_info[i]; + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + } +} + +/** + * igc_clean_rx_ring - Free Rx Buffers per Queue + * @ring: ring to free buffers from + */ +static void igc_clean_rx_ring(struct igc_ring *ring) +{ + if (ring->xsk_pool) + igc_clean_rx_ring_xsk_pool(ring); + else + igc_clean_rx_ring_page_shared(ring); + + clear_ring_uses_large_buffer(ring); + + ring->next_to_alloc = 0; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +/** + * igc_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_rx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igc_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igc_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + */ +void igc_free_rx_resources(struct igc_ring *rx_ring) +{ + igc_clean_rx_ring(rx_ring); + + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igc_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + */ +static void igc_free_all_rx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + igc_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igc_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + */ +int igc_setup_rx_resources(struct igc_ring *rx_ring) +{ + struct net_device *ndev = rx_ring->netdev; + struct device *dev = rx_ring->dev; + u8 index = rx_ring->queue_index; + int size, desc_len, res; + + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, + rx_ring->q_vector->napi.napi_id); + if (res < 0) { + netdev_err(ndev, "Failed to register xdp_rxq index %u\n", + index); + return res; + } + + size = sizeof(struct igc_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + desc_len = sizeof(union igc_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_rx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igc_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + netdev_err(dev, "Error on Rx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + if (!igc_xdp_is_enabled(adapter) || + !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) + return NULL; + + return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); +} + +/** + * igc_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + union igc_adv_rx_desc *rx_desc; + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + u64 rdba = ring->dma; + u32 buf_size; + + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL)); + } + + if (igc_xdp_is_enabled(adapter)) + set_ring_uses_large_buffer(ring); + + /* disable the queue */ + wr32(IGC_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(IGC_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(IGC_RDBAH(reg_idx), rdba >> 32); + wr32(IGC_RDLEN(reg_idx), + ring->count * sizeof(union igc_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + IGC_RDT(reg_idx); + wr32(IGC_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* reset next-to- use/clean to place SW in sync with hardware */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + if (ring->xsk_pool) + buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); + else if (ring_uses_large_buffer(ring)) + buf_size = IGC_RXBUFFER_3072; + else + buf_size = IGC_RXBUFFER_2048; + + srrctl = rd32(IGC_SRRCTL(reg_idx)); + srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | + IGC_SRRCTL_DESCTYPE_MASK); + srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); + srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); + srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; + + wr32(IGC_SRRCTL(reg_idx), srrctl); + + rxdctl |= IGC_RX_PTHRESH; + rxdctl |= IGC_RX_HTHRESH << 8; + rxdctl |= IGC_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igc_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGC_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; + + wr32(IGC_RXDCTL(reg_idx), rxdctl); +} + +/** + * igc_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx(struct igc_adapter *adapter) +{ + int i; + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + igc_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igc_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + */ +static void igc_configure_tx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + u32 txdctl = 0; + + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + + /* disable the queue */ + wr32(IGC_TXDCTL(reg_idx), 0); + wrfl(); + + wr32(IGC_TDLEN(reg_idx), + ring->count * sizeof(union igc_adv_tx_desc)); + wr32(IGC_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(IGC_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + IGC_TDT(reg_idx); + wr32(IGC_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGC_TX_PTHRESH; + txdctl |= IGC_TX_HTHRESH << 8; + txdctl |= IGC_TX_WTHRESH << 16; + + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + wr32(IGC_TXDCTL(reg_idx), txdctl); +} + +/** + * igc_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + */ +static void igc_configure_tx(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igc_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + */ +static void igc_setup_mrqc(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); +} + +/** + * igc_setup_rctl - configure the receive control registers + * @adapter: Board private structure + */ +static void igc_setup_rctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(IGC_RCTL); + + rctl &= ~(3 << IGC_RCTL_MO_SHIFT); + rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); + + rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); + + /* enable stripping of CRC. Newer features require + * that the HW strips the CRC. + */ + rctl |= IGC_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= IGC_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(IGC_RXDCTL(0), 0); + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in set_rx_mode + */ + rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ + IGC_RCTL_BAM | /* RX All Bcast Pkts */ + IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ + IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ + } + + wr32(IGC_RCTL, rctl); +} + +/** + * igc_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + */ +static void igc_setup_tctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which icould be enabled by default */ + wr32(IGC_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_CT; + tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | + (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); + + /* Enable transmits */ + tctl |= IGC_TCTL_EN; + + wr32(IGC_TCTL, tctl); +} + +/** + * igc_set_mac_filter_hw() - Set MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be set + * @index: Filter index + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + */ +static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, + enum igc_mac_filter_type type, + const u8 *addr, int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 ral, rah; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + ral = le32_to_cpup((__le32 *)(addr)); + rah = le16_to_cpup((__le16 *)(addr + 4)); + + if (type == IGC_MAC_FILTER_TYPE_SRC) { + rah &= ~IGC_RAH_ASEL_MASK; + rah |= IGC_RAH_ASEL_SRC_ADDR; + } + + if (queue >= 0) { + rah &= ~IGC_RAH_QSEL_MASK; + rah |= (queue << IGC_RAH_QSEL_SHIFT); + rah |= IGC_RAH_QSEL_ENABLE; + } + + rah |= IGC_RAH_AV; + + wr32(IGC_RAL(index), ral); + wr32(IGC_RAH(index), rah); + + netdev_dbg(dev, "MAC address filter set in HW: index %d", index); +} + +/** + * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be cleared + * @index: Filter index + */ +static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + wr32(IGC_RAL(index), 0); + wr32(IGC_RAH(index), 0); + + netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igc_set_default_mac_filter(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + u8 *addr = adapter->hw.mac.addr; + + netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); + + igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +/** + * igc_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int igc_set_mac(struct net_device *netdev, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igc_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igc_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igc_write_mc_addr_list(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igc_update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igc_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, + bool *first_flag, bool *insert_empty) +{ + struct igc_adapter *adapter = netdev_priv(ring->netdev); + ktime_t cycle_time = adapter->cycle_time; + ktime_t base_time = adapter->base_time; + ktime_t now = ktime_get_clocktai(); + ktime_t baset_est, end_of_cycle; + s32 launchtime; + s64 n; + + n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + + baset_est = ktime_add_ns(base_time, cycle_time * (n)); + end_of_cycle = ktime_add_ns(baset_est, cycle_time); + + if (ktime_compare(txtime, end_of_cycle) >= 0) { + if (baset_est != ring->last_ff_cycle) { + *first_flag = true; + ring->last_ff_cycle = baset_est; + + if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) + *insert_empty = true; + } + } + + /* Introducing a window at end of cycle on which packets + * potentially not honor launchtime. Window of 5us chosen + * considering software update the tail pointer and packets + * are dma'ed to packet buffer. + */ + if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) + netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", + txtime); + + ring->last_tx_cycle = end_of_cycle; + + launchtime = ktime_sub_ns(txtime, baset_est); + if (launchtime > 0) + div_s64_rem(launchtime, cycle_time, &launchtime); + else + launchtime = 0; + + return cpu_to_le32(launchtime); +} + +static int igc_init_empty_frame(struct igc_ring *ring, + struct igc_tx_buffer *buffer, + struct sk_buff *skb) +{ + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + + dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->protocol = 0; + buffer->bytecount = skb->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, skb->len); + dma_unmap_addr_set(buffer, dma, dma); + + return 0; +} + +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) +{ + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + err = igc_init_empty_frame(ring, first, skb); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + first->bytecount; + olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + + netdev_tx_sent_queue(txring_txq(ring), skb->len); + + first->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 + +static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, + __le32 launch_time, bool first_flag, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct igc_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IGC_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; + + /* For i225, context index must be unique per ring. */ + if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + if (first_flag) + mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + context_desc->launch_time = launch_time; +} + +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGC_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, 0); +} + +static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* memory barriier comment */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igc_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + if (igc_desc_unused(tx_ring) >= size) + return 0; + return __igc_maybe_stop_tx(tx_ring, size); +} + +#define IGC_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IGC_ADVTXD_DTYP_DATA | + IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, + IGC_ADVTXD_DCMD_VLE); + + /* set segmentation bits for TSO */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, + (IGC_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, + (IGC_ADVTXD_MAC_TSTAMP)); + + /* insert frame checksum */ + cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igc_tx_olinfo_status(struct igc_ring *tx_ring, + union igc_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; + + /* insert L4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * + ((IGC_TXD_POPTS_TXSM << 8) / + IGC_TX_FLAGS_CSUM); + + /* insert IPv4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * + (((IGC_TXD_POPTS_IXSM << 8)) / + IGC_TX_FLAGS_IPV4); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int igc_tx_map(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 tx_flags = first->tx_flags; + skb_frag_t *frag; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + dma_addr_t dma; + u32 cmd_type; + + cmd_type = igc_tx_cmd_type(skb, tx_flags); + tx_desc = IGC_TX_DESC(tx_ring, i); + + igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGC_MAX_DATA_PER_TXD; + size -= IGC_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGC_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igc_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + + return 0; +dma_error: + netdev_err(tx_ring->netdev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +static int igc_tso(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM | + IGC_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, mss_l4len_idx); + + return 1; +} + +static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, + struct igc_ring *tx_ring) +{ + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + bool first_flag = false, insert_empty = false; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + struct igc_tx_buffer *first; + __le32 launch_time = 0; + u32 tx_flags = 0; + unsigned short f; + ktime_t txtime; + u8 hdr_len = 0; + int tso = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igc_maybe_stop_tx(tx_ring, count + 5)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (!tx_ring->launchtime_enable) + goto done; + + txtime = skb->tstamp; + skb->tstamp = ktime_set(0, 0); + launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + + if (insert_empty) { + struct igc_tx_buffer *empty_info; + struct sk_buff *empty; + void *data; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (!empty) + goto done; + + data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + if (igc_init_tx_empty_descriptor(tx_ring, + empty, + empty_info) < 0) + dev_kfree_skb_any(empty); + } + +done: + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGC_TX_BUFFER_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (adapter->qbv_transition || tx_ring->oper_gate_closed) + goto out_drop; + + if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { + adapter->stats.txdrop++; + goto out_drop; + } + + if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + /* FIXME: add support for retrieving timestamps from + * the other timer registers before skipping the + * timestamping request. + */ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + if (!adapter->ptp_tx_skb) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGC_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + } else { + adapter->tx_hwtstamp_skipped++; + } + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGC_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igc_tx_csum(tx_ring, first, launch_time, first_flag); + + igc_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + + return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); +} + +static void igc_rx_checksum(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igc_test_staterr(rx_desc, + IGC_RXDEXT_STATERR_L4E | + IGC_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets (aka let the stack check the crc32c) + */ + if (!(skb->len == 60 && + test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | + IGC_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netdev_dbg(ring->netdev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +/* Mapping HW RSS Type to enum pkt_hash_types */ +static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { + [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2, + [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, + [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ + [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */ + [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */ + [13] = PKT_HASH_TYPE_NONE, + [14] = PKT_HASH_TYPE_NONE, + [15] = PKT_HASH_TYPE_NONE, +}; + +static inline void igc_rx_hash(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) { + u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + u32 rss_type = igc_rss_type(rx_desc); + + skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]); + } +} + +static void igc_rx_vlan(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + u16 vid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { + if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && + test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/** + * igc_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in order + * to populate the hash, checksum, VLAN, protocol, and other fields within the + * skb. + */ +static void igc_process_skb_fields(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + igc_rx_hash(rx_ring, rx_desc, skb); + + igc_rx_checksum(rx_ring, rx_desc, skb); + + igc_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl |= IGC_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~IGC_CTRL_VME; + } + wr32(IGC_CTRL, ctrl); +} + +static void igc_restore_vlan(struct igc_adapter *adapter) +{ + igc_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, + const unsigned int size, + int *rx_buffer_pgcnt) +{ + struct igc_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, + unsigned int truesize) +{ +#if (PAGE_SIZE < 8192) + buffer->page_offset ^= truesize; +#else + buffer->page_offset += truesize; +#endif +} + +static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(ring) / 2; +#else + truesize = ring_uses_build_skb(ring) ? + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +/** + * igc_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + */ +static void igc_add_rx_frag(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(rx_ring) / 2; +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + + igc_rx_buffer_flip(rx_buffer, truesize); +} + +static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp) +{ + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* build an skb around the page buffer */ + skb = napi_build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, size); + if (metasize) + skb_metadata_set(skb, metasize); + + igc_rx_buffer_flip(rx_buffer, truesize); + return skb; +} + +static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + void *va = xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IGC_RX_HDR_LEN + metasize); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGC_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, + ALIGN(headlen + metasize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); + igc_rx_buffer_flip(rx_buffer, truesize); + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * igc_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void igc_reuse_rx_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct igc_rx_buffer *new_buff; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGC_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igc_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + */ +static bool igc_is_non_eop(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGC_RX_DESC(rx_ring, ntc)); + + if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igc_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool igc_cleanup_headers(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static void igc_put_rx_buffer(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + /* hand second half of page back to the ring */ + igc_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) +{ + struct igc_adapter *adapter = rx_ring->q_vector->adapter; + + if (ring_uses_build_skb(rx_ring)) + return IGC_SKB_PAD; + if (igc_xdp_is_enabled(adapter)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igc_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igc_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: rx descriptor ring + * @cleaned_count: number of buffers to clean + */ +static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) +{ + union igc_adv_rx_desc *rx_desc; + u16 i = rx_ring->next_to_use; + struct igc_rx_buffer *bi; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGC_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igc_rx_bufsz(rx_ring); + + do { + if (!igc_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGC_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) +{ + union igc_adv_rx_desc *desc; + u16 i = ring->next_to_use; + struct igc_rx_buffer *bi; + dma_addr_t dma; + bool ok = true; + + if (!count) + return ok; + + XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff); + + desc = IGC_RX_DESC(ring, i); + bi = &ring->rx_buffer_info[i]; + i -= ring->count; + + do { + bi->xdp = xsk_buff_alloc(ring->xsk_pool); + if (!bi->xdp) { + ok = false; + break; + } + + dma = xsk_buff_xdp_get_dma(bi->xdp); + desc->read.pkt_addr = cpu_to_le64(dma); + + desc++; + bi++; + i++; + if (unlikely(!i)) { + desc = IGC_RX_DESC(ring, 0); + bi = ring->rx_buffer_info; + i -= ring->count; + } + + /* Clear the length for the next_to_use descriptor. */ + desc->wb.upper.length = 0; + + count--; + } while (count); + + i += ring->count; + + if (ring->next_to_use != i) { + ring->next_to_use = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, ring->tail); + } + + return ok; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, + struct xdp_frame *xdpf) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 count, index = ring->next_to_use; + struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; + struct igc_tx_buffer *buffer = head; + union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); + u32 olinfo_status, len = xdpf->len, cmd_type; + void *data = xdpf->data; + u16 i; + + count = TXD_USE_COUNT(len); + for (i = 0; i < nr_frags; i++) + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); + + if (igc_maybe_stop_tx(ring, count + 3)) { + /* this is a hard error */ + return -EBUSY; + } + + i = 0; + head->bytecount = xdp_get_frame_len(xdpf); + head->type = IGC_TX_BUFFER_TYPE_XDP; + head->gso_segs = 1; + head->xdpf = xdpf; + + olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + for (;;) { + dma_addr_t dma; + + dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, + "Failed to map DMA for TX\n"); + goto unmap; + } + + dma_unmap_len_set(buffer, len, len); + dma_unmap_addr_set(buffer, dma, dma); + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | len; + + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.buffer_addr = cpu_to_le64(dma); + + buffer->protocol = 0; + + if (++index == ring->count) + index = 0; + + if (i == nr_frags) + break; + + buffer = &ring->tx_buffer_info[index]; + desc = IGC_TX_DESC(ring, index); + desc->read.olinfo_status = 0; + + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } + desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); + + netdev_tx_sent_queue(txring_txq(ring), head->bytecount); + /* set the timestamp */ + head->time_stamp = jiffies; + /* set next_to_watch value indicating a packet is present */ + head->next_to_watch = desc; + ring->next_to_use = index; + + return 0; + +unmap: + for (;;) { + buffer = &ring->tx_buffer_info[index]; + if (dma_unmap_len(buffer, len)) + dma_unmap_page(ring->dev, + dma_unmap_addr(buffer, dma), + dma_unmap_len(buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(buffer, len, 0); + if (buffer == head) + break; + + if (!index) + index += ring->count; + index--; + } + + return -ENOMEM; +} + +static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= adapter->num_tx_queues) + index -= adapter->num_tx_queues; + + return adapter->tx_ring[index]; +} + +static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int res; + + if (unlikely(!xdpf)) + return -EFAULT; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + res = igc_xdp_init_tx_descriptor(ring, xdpf); + __netif_tx_unlock(nq); + return res; +} + +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __igc_xdp_run_prog(struct igc_adapter *adapter, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act = bpf_prog_run_xdp(prog, xdp); + + switch (act) { + case XDP_PASS: + return IGC_XDP_PASS; + case XDP_TX: + if (igc_xdp_xmit_back(adapter, xdp) < 0) + goto out_failure; + return IGC_XDP_TX; + case XDP_REDIRECT: + if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) + goto out_failure; + return IGC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + return IGC_XDP_CONSUMED; + } +} + +static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + prog = READ_ONCE(adapter->xdp_prog); + if (!prog) { + res = IGC_XDP_PASS; + goto out; + } + + res = __igc_xdp_run_prog(adapter, prog, xdp); + +out: + return ERR_PTR(-res); +} + +/* This function assumes __netif_tx_lock is held by the caller. */ +static void igc_flush_tx_descriptors(struct igc_ring *ring) +{ + /* Once tail pointer is updated, hardware can fetch the descriptors + * any time so we issue a write membar here to ensure all memory + * writes are complete before the tail pointer is updated. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static void igc_finalize_xdp(struct igc_adapter *adapter, int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + + if (status & IGC_XDP_TX) { + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + igc_flush_tx_descriptors(ring); + __netif_tx_unlock(nq); + } + + if (status & IGC_XDP_REDIRECT) + xdp_do_flush(); +} + +static void igc_update_rx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->rx.ring; + + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.packets += packets; + ring->rx_stats.bytes += bytes; + u64_stats_update_end(&ring->rx_syncp); + + q_vector->rx.total_packets += packets; + q_vector->rx.total_bytes += bytes; +} + +static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + u16 cleaned_count = igc_desc_unused(rx_ring); + int xdp_status = 0, rx_buffer_pgcnt; + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *rx_desc; + struct igc_rx_buffer *rx_buffer; + unsigned int size, truesize; + struct igc_xdp_buff ctx; + ktime_t timestamp = 0; + int pkt_offset = 0; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGC_RX_BUFFER_WRITE) { + igc_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); + truesize = igc_get_rx_frame_truesize(rx_ring, size); + + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + pktbuf); + pkt_offset = IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + if (!skb) { + xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); + xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), + igc_rx_offset(rx_ring) + pkt_offset, + size, true); + xdp_buff_clear_frags_flag(&ctx.xdp); + ctx.rx_desc = rx_desc; + + skb = igc_xdp_run_prog(adapter, &ctx.xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + switch (xdp_res) { + case IGC_XDP_CONSUMED: + rx_buffer->pagecnt_bias++; + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + igc_rx_buffer_flip(rx_buffer, truesize); + xdp_status |= xdp_res; + break; + } + + total_packets++; + total_bytes += size; + } else if (skb) + igc_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp); + else + skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp, + timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igc_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + igc_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (cleaned_count) + igc_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, + struct xdp_buff *xdp) +{ + unsigned int totalsize = xdp->data_end - xdp->data_meta; + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + net_prefetch(xdp->data_meta); + + skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + return skb; +} + +static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, + union igc_adv_rx_desc *desc, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + struct igc_ring *ring = q_vector->rx.ring; + struct sk_buff *skb; + + skb = igc_construct_skb_zc(ring, xdp); + if (!skb) { + ring->rx_stats.alloc_failed++; + return; + } + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + if (igc_cleanup_headers(ring, desc, skb)) + return; + + igc_process_skb_fields(ring, desc, skb); + napi_gro_receive(&q_vector->napi, skb); +} + +static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp) +{ + /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The + * igc_xdp_buff shares its layout with xdp_buff_xsk and private + * igc_xdp_buff fields fall into xdp_buff_xsk->cb + */ + return (struct igc_xdp_buff *)xdp; +} + +static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *ring = q_vector->rx.ring; + u16 cleaned_count = igc_desc_unused(ring); + int total_bytes = 0, total_packets = 0; + u16 ntc = ring->next_to_clean; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + + rcu_read_lock(); + + prog = READ_ONCE(adapter->xdp_prog); + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *desc; + struct igc_rx_buffer *bi; + struct igc_xdp_buff *ctx; + ktime_t timestamp = 0; + unsigned int size; + int res; + + desc = IGC_RX_DESC(ring, ntc); + size = le16_to_cpu(desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = &ring->rx_buffer_info[ntc]; + + ctx = xsk_buff_to_igc_ctx(bi->xdp); + ctx->rx_desc = desc; + + if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + bi->xdp->data); + + bi->xdp->data += IGC_TS_HDR_LEN; + + /* HW timestamp has been copied into local variable. Metadata + * length when XDP program is called should be 0. + */ + bi->xdp->data_meta += IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); + + res = __igc_xdp_run_prog(adapter, prog, bi->xdp); + switch (res) { + case IGC_XDP_PASS: + igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); + fallthrough; + case IGC_XDP_CONSUMED: + xsk_buff_free(bi->xdp); + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + bi->xdp = NULL; + total_bytes += size; + total_packets++; + cleaned_count++; + ntc++; + if (ntc == ring->count) + ntc = 0; + } + + ring->next_to_clean = ntc; + rcu_read_unlock(); + + if (cleaned_count >= IGC_RX_BUFFER_WRITE) + failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (xsk_uses_need_wakeup(ring->xsk_pool)) { + if (failure || ring->next_to_clean == ring->next_to_use) + xsk_set_rx_need_wakeup(ring->xsk_pool); + else + xsk_clear_rx_need_wakeup(ring->xsk_pool); + return total_packets; + } + + return failure ? budget : total_packets; +} + +static void igc_update_tx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->tx.ring; + + u64_stats_update_begin(&ring->tx_syncp); + ring->tx_stats.bytes += bytes; + ring->tx_stats.packets += packets; + u64_stats_update_end(&ring->tx_syncp); + + q_vector->tx.total_bytes += bytes; + q_vector->tx.total_packets += packets; +} + +static void igc_xdp_xmit_zc(struct igc_ring *ring) +{ + struct xsk_buff_pool *pool = ring->xsk_pool; + struct netdev_queue *nq = txring_txq(ring); + union igc_adv_tx_desc *tx_desc = NULL; + int cpu = smp_processor_id(); + u16 ntu = ring->next_to_use; + struct xdp_desc xdp_desc; + u16 budget; + + if (!netif_carrier_ok(ring->netdev)) + return; + + __netif_tx_lock(nq, cpu); + + budget = igc_desc_unused(ring); + + while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { + u32 cmd_type, olinfo_status; + struct igc_tx_buffer *bi; + dma_addr_t dma; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + xdp_desc.len; + olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; + + dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); + + tx_desc = IGC_TX_DESC(ring, ntu); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + bi = &ring->tx_buffer_info[ntu]; + bi->type = IGC_TX_BUFFER_TYPE_XSK; + bi->protocol = 0; + bi->bytecount = xdp_desc.len; + bi->gso_segs = 1; + bi->time_stamp = jiffies; + bi->next_to_watch = tx_desc; + + netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); + + ntu++; + if (ntu == ring->count) + ntu = 0; + } + + ring->next_to_use = ntu; + if (tx_desc) { + igc_flush_tx_descriptors(ring); + xsk_tx_release(pool); + } + + __netif_tx_unlock(nq); +} + +/** + * igc_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + */ +static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + struct igc_ring *tx_ring = q_vector->tx.ring; + unsigned int i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 xsk_frames = 0; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGC_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + napi_consume_skb(tx_buffer->skb, napi_budget); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + igc_update_tx_stats(q_vector, total_packets, total_bytes); + + if (tx_ring->xsk_pool) { + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) + xsk_set_tx_need_wakeup(tx_ring->xsk_pool); + igc_xdp_xmit_zc(tx_ring); + } + + if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct igc_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && + (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && + !tx_ring->oper_gate_closed) { + /* detected Tx unit hang */ + netdev_err(tx_ring->netdev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(IGC_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && + netif_carrier_ok(tx_ring->netdev) && + igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGC_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +static int igc_find_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 ral, rah; + int i; + + for (i = 0; i < max_entries; i++) { + ral = rd32(IGC_RAL(i)); + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + continue; + if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) + continue; + if ((rah & IGC_RAH_RAH_MASK) != + le16_to_cpup((__le16 *)(addr + 4))) + continue; + if (ral != le32_to_cpup((__le32 *)(addr))) + continue; + + return i; + } + + return -1; +} + +static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 rah; + int i; + + for (i = 0; i < max_entries; i++) { + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + return i; + } + + return -1; +} + +/** + * igc_add_mac_filter() - Add MAC address filter + * @adapter: Pointer to adapter where the filter should be added + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr, + int queue) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index >= 0) + goto update_filter; + + index = igc_get_avail_mac_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", + index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr, queue); + +update_filter: + igc_set_mac_filter_hw(adapter, index, type, addr, queue); + return 0; +} + +/** + * igc_del_mac_filter() - Delete MAC address filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @type: MAC address filter type (source or destination) + * @addr: MAC address + */ +static void igc_del_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index < 0) + return; + + if (index == 0) { + /* If this is the default filter, we don't actually delete it. + * We just reset to its default value i.e. disable queue + * assignment. + */ + netdev_dbg(dev, "Disable default MAC filter queue assignment"); + + igc_set_mac_filter_hw(adapter, 0, type, addr, -1); + } else { + netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", + index, + type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr); + + igc_clear_mac_filter_hw(adapter, index); + } +} + +/** + * igc_add_vlan_prio_filter() - Add VLAN priority filter + * @adapter: Pointer to adapter where the filter should be added + * @prio: VLAN priority value + * @queue: Queue number which matching frames are assigned to + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, + int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + if (vlanpqf & IGC_VLANPQF_VALID(prio)) { + netdev_dbg(dev, "VLAN priority filter already in use\n"); + return -EEXIST; + } + + vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); + vlanpqf |= IGC_VLANPQF_VALID(prio); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", + prio, queue); + return 0; +} + +/** + * igc_del_vlan_prio_filter() - Delete VLAN priority filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @prio: VLAN priority value + */ +static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) +{ + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + vlanpqf &= ~IGC_VLANPQF_VALID(prio); + vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", + prio); +} + +static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if (!(etqf & IGC_ETQF_FILTER_ENABLE)) + return i; + } + + return -1; +} + +/** + * igc_add_etype_filter() - Add ethertype filter + * @adapter: Pointer to adapter where the filter should be added + * @etype: Ethertype value + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, + int queue) +{ + struct igc_hw *hw = &adapter->hw; + int index; + u32 etqf; + + index = igc_get_avail_etype_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + etqf = rd32(IGC_ETQF(index)); + + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= etype; + + if (queue >= 0) { + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); + etqf |= IGC_ETQF_QUEUE_ENABLE; + } + + etqf |= IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(index), etqf); + + netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", + etype, queue); + return 0; +} + +static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) + return i; + } + + return -1; +} + +/** + * igc_del_etype_filter() - Delete ethertype filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @etype: Ethertype value + */ +static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int index; + + index = igc_find_etype_filter(adapter, etype); + if (index < 0) + return; + + wr32(IGC_ETQF(index), 0); + + netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", + etype); +} + +static int igc_flex_filter_select(struct igc_adapter *adapter, + struct igc_flex_filter *input, + u32 *fhft) +{ + struct igc_hw *hw = &adapter->hw; + u8 fhft_index; + u32 fhftsl; + + if (input->index >= MAX_FLEX_FILTER) { + dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); + return -EINVAL; + } + + /* Indirect table select register */ + fhftsl = rd32(IGC_FHFTSL); + fhftsl &= ~IGC_FHFTSL_FTSL_MASK; + switch (input->index) { + case 0 ... 7: + fhftsl |= 0x00; + break; + case 8 ... 15: + fhftsl |= 0x01; + break; + case 16 ... 23: + fhftsl |= 0x02; + break; + case 24 ... 31: + fhftsl |= 0x03; + break; + } + wr32(IGC_FHFTSL, fhftsl); + + /* Normalize index down to host table register */ + fhft_index = input->index % 8; + + *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : + IGC_FHFT_EXT(fhft_index - 4); + + return 0; +} + +static int igc_write_flex_filter_ll(struct igc_adapter *adapter, + struct igc_flex_filter *input) +{ + struct device *dev = &adapter->pdev->dev; + struct igc_hw *hw = &adapter->hw; + u8 *data = input->data; + u8 *mask = input->mask; + u32 queuing; + u32 fhft; + u32 wufc; + int ret; + int i; + + /* Length has to be aligned to 8. Otherwise the filter will fail. Bail + * out early to avoid surprises later. + */ + if (input->length % 8 != 0) { + dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); + return -EINVAL; + } + + /* Select corresponding flex filter register and get base for host table. */ + ret = igc_flex_filter_select(adapter, input, &fhft); + if (ret) + return ret; + + /* When adding a filter globally disable flex filter feature. That is + * recommended within the datasheet. + */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + + /* Configure filter */ + queuing = input->length & IGC_FHFT_LENGTH_MASK; + queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; + queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; + + if (input->immediate_irq) + queuing |= IGC_FHFT_IMM_INT; + + if (input->drop) + queuing |= IGC_FHFT_DROP; + + wr32(fhft + 0xFC, queuing); + + /* Write data (128 byte) and mask (128 bit) */ + for (i = 0; i < 16; ++i) { + const size_t data_idx = i * 8; + const size_t row_idx = i * 16; + u32 dw0 = + (data[data_idx + 0] << 0) | + (data[data_idx + 1] << 8) | + (data[data_idx + 2] << 16) | + (data[data_idx + 3] << 24); + u32 dw1 = + (data[data_idx + 4] << 0) | + (data[data_idx + 5] << 8) | + (data[data_idx + 6] << 16) | + (data[data_idx + 7] << 24); + u32 tmp; + + /* Write row: dw0, dw1 and mask */ + wr32(fhft + row_idx, dw0); + wr32(fhft + row_idx + 4, dw1); + + /* mask is only valid for MASK(7, 0) */ + tmp = rd32(fhft + row_idx + 8); + tmp &= ~GENMASK(7, 0); + tmp |= mask[i]; + wr32(fhft + row_idx + 8, tmp); + } + + /* Enable filter. */ + wufc |= IGC_WUFC_FLEX_HQ; + if (input->index > 8) { + /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); + + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc |= (IGC_WUFC_FLX0 << input->index); + } + wr32(IGC_WUFC, wufc); + + dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", + input->index); + + return 0; +} + +static void igc_flex_filter_add_field(struct igc_flex_filter *flex, + const void *src, unsigned int offset, + size_t len, const void *mask) +{ + int i; + + /* data */ + memcpy(&flex->data[offset], src, len); + + /* mask */ + for (i = 0; i < len; ++i) { + const unsigned int idx = i + offset; + const u8 *ptr = mask; + + if (mask) { + if (ptr[i] & 0xff) + flex->mask[idx / 8] |= BIT(idx % 8); + + continue; + } + + flex->mask[idx / 8] |= BIT(idx % 8); + } +} + +static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + int i; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + for (i = 0; i < MAX_FLEX_FILTER; i++) { + if (i < 8) { + if (!(wufc & (IGC_WUFC_FLX0 << i))) + return i; + } else { + if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) + return i; + } + } + + return -ENOSPC; +} + +static bool igc_flex_filter_in_use(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + if (wufc & IGC_WUFC_FILTER_MASK) + return true; + + if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) + return true; + + return false; +} + +static int igc_add_flex_filter(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct igc_flex_filter flex = { }; + struct igc_nfc_filter *filter = &rule->filter; + unsigned int eth_offset, user_offset; + int ret, index; + bool vlan; + + index = igc_find_avail_flex_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + /* Construct the flex filter: + * -> dest_mac [6] + * -> src_mac [6] + * -> tpid [2] + * -> vlan tci [2] + * -> ether type [2] + * -> user data [8] + * -> = 26 bytes => 32 length + */ + flex.index = index; + flex.length = 32; + flex.rx_queue = rule->action; + + vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; + eth_offset = vlan ? 16 : 12; + user_offset = vlan ? 18 : 14; + + /* Add destination MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, + ETH_ALEN, NULL); + + /* Add source MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->src_addr, 6, + ETH_ALEN, NULL); + + /* Add VLAN etype */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) + igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, + sizeof(filter->vlan_etype), + NULL); + + /* Add VLAN TCI */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, + sizeof(filter->vlan_tci), NULL); + + /* Add Ether type */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + __be16 etype = cpu_to_be16(filter->etype); + + igc_flex_filter_add_field(&flex, &etype, eth_offset, + sizeof(etype), NULL); + } + + /* Add user data */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) + igc_flex_filter_add_field(&flex, &filter->user_data, + user_offset, + sizeof(filter->user_data), + filter->user_mask); + + /* Add it down to the hardware and enable it. */ + ret = igc_write_flex_filter_ll(adapter, &flex); + if (ret) + return ret; + + filter->flex_index = index; + + return 0; +} + +static void igc_del_flex_filter(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc; + + /* Just disable the filter. The filter table itself is kept + * intact. Another flex_filter_add() should override the "old" data + * then. + */ + if (reg_index > 8) { + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc = rd32(IGC_WUFC); + + wufc &= ~(IGC_WUFC_FLX0 << reg_index); + wr32(IGC_WUFC, wufc); + } + + if (igc_flex_filter_in_use(adapter)) + return; + + /* No filters are in use, we may disable flex filters */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); +} + +static int igc_enable_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + int err; + + if (rule->flex) { + return igc_add_flex_filter(adapter, rule); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_add_etype_filter(adapter, rule->filter.etype, + rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + err = igc_add_vlan_prio_filter(adapter, prio, rule->action); + if (err) + return err; + } + + return 0; +} + +static void igc_disable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + if (rule->flex) { + igc_del_flex_filter(adapter, rule->filter.flex_index); + return; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_del_etype_filter(adapter, rule->filter.etype); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + igc_del_vlan_prio_filter(adapter, prio); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr); +} + +/** + * igc_get_nfc_rule() - Get NFC rule + * @adapter: Pointer to adapter + * @location: Rule location + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: Pointer to NFC rule at @location. If not found, NULL. + */ +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location) +{ + struct igc_nfc_rule *rule; + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (rule->location == location) + return rule; + if (rule->location > location) + break; + } + + return NULL; +} + +/** + * igc_del_nfc_rule() - Delete NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be deleted + * + * Disable NFC rule in hardware and delete it from adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + */ +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + igc_disable_nfc_rule(adapter, rule); + + list_del(&rule->list); + adapter->nfc_rule_count--; + + kfree(rule); +} + +static void igc_flush_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule, *tmp; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +/** + * igc_add_nfc_rule() - Add NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be added + * + * Enable NFC rule in hardware and add it to adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 on success, negative errno on failure. + */ +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + struct igc_nfc_rule *pred, *cur; + int err; + + err = igc_enable_nfc_rule(adapter, rule); + if (err) + return err; + + pred = NULL; + list_for_each_entry(cur, &adapter->nfc_rule_list, list) { + if (cur->location >= rule->location) + break; + pred = cur; + } + + list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); + adapter->nfc_rule_count++; + return 0; +} + +static void igc_restore_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) + igc_enable_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); + return 0; +} + +/** + * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + */ +static void igc_set_rx_mode(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= IGC_RCTL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igc_write_mc_addr_list(netdev); + if (count < 0) + rctl |= IGC_RCTL_MPE; + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) + rctl |= IGC_RCTL_UPE; + + /* update state of unicast and multicast */ + rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); + wr32(IGC_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) + rlpml = IGC_MAX_FRAME_BUILD_SKB; +#endif + wr32(IGC_RLPML, rlpml); +} + +/** + * igc_configure - configure the hardware for RX and TX + * @adapter: private board structure + */ +static void igc_configure(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i = 0; + + igc_get_hw_control(adapter); + igc_set_rx_mode(netdev); + + igc_restore_vlan(adapter); + + igc_setup_tctl(adapter); + igc_setup_mrqc(adapter); + igc_setup_rctl(adapter); + + igc_set_default_mac_filter(adapter); + igc_restore_nfc_rules(adapter); + + igc_configure_tx(adapter); + igc_configure_rx(adapter); + + igc_rx_fifo_flush_base(&adapter->hw); + + /* call igc_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); + } +} + +/** + * igc_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + */ +static void igc_write_ivar(struct igc_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(IGC_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | IGC_IVAR_VALID) << offset; + + array_wr32(IGC_IVAR0, index, ivar); +} + +static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + int rx_queue = IGC_N0_QUEUE; + int tx_queue = IGC_N0_QUEUE; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case igc_i225: + if (rx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igc_configure_msix - Configure MSI-X hardware + * @adapter: Pointer to adapter structure + * + * igc_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + */ +static void igc_configure_msix(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i, vector = 0; + u32 tmp; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case igc_i225: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | + IGC_GPIE_PBA | IGC_GPIE_EIAME | + IGC_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | IGC_IVAR_VALID) << 8; + + wr32(IGC_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igc_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void igc_irq_enable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; + u32 regval = rd32(IGC_EIAC); + + wr32(IGC_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(IGC_EIAM); + wr32(IGC_EIAM, regval | adapter->eims_enable_mask); + wr32(IGC_EIMS, adapter->eims_enable_mask); + wr32(IGC_IMS, ims); + } else { + wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + } +} + +/** + * igc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void igc_irq_disable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 regval = rd32(IGC_EIAM); + + wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); + wr32(IGC_EIMC, adapter->eims_enable_mask); + regval = rd32(IGC_EIAC); + wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(IGC_IAM, 0); + wr32(IGC_IMC, ~0); + wrfl(); + + if (adapter->msix_entries) { + int vector = 0, i; + + synchronize_irq(adapter->msix_entries[vector++].vector); + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues) +{ + /* Determine if we need to pair queues. */ + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; +} + +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) +{ + return IGC_MAX_RX_QUEUES; +} + +static void igc_init_queue_configuration(struct igc_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igc_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igc_set_flag_queue_pairs(adapter, max_rss_queues); +} + +/** + * igc_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igc_free_q_vector. + */ +static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* if we're coming from igc_set_interrupt_capability, the vectors are + * not yet allocated + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + netif_napi_del(&q_vector->napi); +} + +/** + * igc_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + */ +static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igc_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igc_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + */ +static void igc_free_q_vectors(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igc_reset_q_vector(adapter, v_idx); + igc_free_q_vector(adapter, v_idx); + } +} + +/** + * igc_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + */ +static void igc_update_itr(struct igc_q_vector *q_vector, + struct igc_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes / packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igc_set_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + current_itr = 0; + new_itr = IGC_4K_ITR; + goto set_itr_now; + default: + break; + } + + igc_update_itr(q_vector, &q_vector->tx); + igc_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igc_reset_interrupt_capability(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IGC_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); + } + + while (v_idx--) + igc_reset_q_vector(adapter, v_idx); +} + +/** + * igc_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: Pointer to adapter structure + * @msix: boolean value for MSI-X capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static void igc_set_interrupt_capability(struct igc_adapter *adapter, + bool msix) +{ + int numvecs, i; + int err; + + if (!msix) + goto msi_only; + adapter->flags |= IGC_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + + adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), + GFP_KERNEL); + + if (!adapter->msix_entries) + return; + + /* populate entry values */ + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + igc_reset_interrupt_capability(adapter); + +msi_only: + adapter->flags &= ~IGC_FLAG_HAS_MSIX; + + adapter->rss_queues = 1; + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGC_FLAG_HAS_MSI; +} + +/** + * igc_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igc_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + */ +static void igc_update_ring_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + new_val = IGC_4K_ITR; + goto set_itr_val; + default: + break; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if (avg_wire_size > 300 && avg_wire_size < 1200) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGC_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGC_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +static void igc_ring_irq_enable(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if (adapter->num_q_vectors == 1) + igc_set_itr(q_vector); + else + igc_update_ring_itr(q_vector); + } + + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->msix_entries) + wr32(IGC_EIMS, q_vector->eims_value); + else + igc_irq_enable(adapter); + } +} + +static void igc_add_ring(struct igc_ring *ring, + struct igc_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igc_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + */ +static void igc_cache_ring_register(struct igc_adapter *adapter) +{ + int i = 0, j = 0; + + switch (adapter->hw.mac.type) { + case igc_i225: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = j; + break; + } +} + +/** + * igc_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + */ +static int igc_poll(struct napi_struct *napi, int budget) +{ + struct igc_q_vector *q_vector = container_of(napi, + struct igc_q_vector, + napi); + struct igc_ring *rx_ring = q_vector->rx.ring; + bool clean_complete = true; + int work_done = 0; + + if (q_vector->tx.ring) + clean_complete = igc_clean_tx_irq(q_vector, budget); + + if (rx_ring) { + int cleaned = rx_ring->xsk_pool ? + igc_clean_rx_irq_zc(q_vector, budget) : + igc_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igc_ring_irq_enable(q_vector); + + return min(work_done, budget - 1); +} + +/** + * igc_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int igc_alloc_q_vector(struct igc_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct igc_q_vector *q_vector; + struct igc_ring *ring; + int ring_count; + + /* igc only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); + else + memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + IGC_EITR(0); + q_vector->itr_val = IGC_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* initialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igc_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igc_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + +/** + * igc_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int igc_alloc_q_vectors(struct igc_adapter *adapter) +{ + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int q_vectors = adapter->num_q_vectors; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igc_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: Pointer to adapter structure + * @msix: boolean for MSI-X capability + * + * This function initializes the interrupts and allocates all of the queues. + */ +static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) +{ + struct net_device *dev = adapter->netdev; + int err = 0; + + igc_set_interrupt_capability(adapter, msix); + + err = igc_alloc_q_vectors(adapter); + if (err) { + netdev_err(dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igc_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igc_reset_interrupt_capability(adapter); + return err; +} + +/** + * igc_sw_init - Initialize general software structures (struct igc_adapter) + * @adapter: board private structure to initialize + * + * igc_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int igc_sw_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGC_DEFAULT_TXD; + adapter->rx_ring_count = IGC_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGC_DEFAULT_ITR; + adapter->tx_itr_setting = IGC_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; + + /* adjust max frame to be at least the size of a standard frame */ + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + mutex_init(&adapter->nfc_rule_lock); + INIT_LIST_HEAD(&adapter->nfc_rule_list); + adapter->nfc_rule_count = 0; + + spin_lock_init(&adapter->stats64_lock); + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGC_FLAG_HAS_MSIX; + + igc_init_queue_configuration(adapter); + + /* This call may decrease the number of queues */ + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igc_irq_disable(adapter); + + set_bit(__IGC_DOWN, &adapter->state); + + return 0; +} + +/** + * igc_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + */ +void igc_up(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i = 0; + + /* hardware has been reset, we need to reload some things */ + igc_configure(adapter); + + clear_bit(__IGC_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + + if (adapter->msix_entries) + igc_configure_msix(adapter); + else + igc_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); +} + +/** + * igc_update_stats - Update the board statistics counters + * @adapter: board private structure + */ +void igc_update_stats(struct igc_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.tlpic += rd32(IGC_TLPIC); + adapter->stats.rlpic += rd32(IGC_RLPIC); + adapter->stats.hgptc += rd32(IGC_HGPTC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + adapter->stats.colc += rd32(IGC_RERC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + + adapter->stats.iac += rd32(IGC_IAC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped */ + net_stats->tx_dropped = adapter->stats.txdrop; + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); +} + +/** + * igc_down - Close the interface + * @adapter: board private structure + */ +void igc_down(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i = 0; + + set_bit(__IGC_DOWN, &adapter->state); + + igc_ptp_suspend(adapter); + + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } + /* set trans_start so we don't get spurious watchdogs during reset */ + netif_trans_update(netdev); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); + + igc_irq_disable(adapter); + } + + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igc_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; + + igc_clean_all_tx_rings(adapter); + igc_clean_all_rx_rings(adapter); +} + +void igc_reinit_locked(struct igc_adapter *adapter) +{ + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igc_down(adapter); + igc_up(adapter); + clear_bit(__IGC_RESETTING, &adapter->state); +} + +static void igc_reset_task(struct work_struct *work) +{ + struct igc_adapter *adapter; + + adapter = container_of(work, struct igc_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGC_DOWN, &adapter->state) || + test_bit(__IGC_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igc_rings_dump(adapter); + igc_regs_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igc_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igc_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int igc_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(netdev, "Jumbo frames not supported with XDP"); + return -EINVAL; + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igc_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igc_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igc_up(adapter); + else + igc_reset(adapter); + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +/** + * igc_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: queue number that timed out + **/ +static void igc_tx_timeout(struct net_device *netdev, + unsigned int __always_unused txqueue) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + wr32(IGC_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +/** + * igc_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. + */ +static void igc_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + if (!test_bit(__IGC_RESETTING, &adapter->state)) + igc_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igc_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igc_vlan_mode(netdev, features); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) + igc_flush_nfc_rules(adapter); + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; +} + +static netdev_features_t +igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igc_tsync_interrupt(struct igc_adapter *adapter) +{ + u32 ack, tsauxc, sec, nsec, tsicr; + struct igc_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + + tsicr = rd32(IGC_TSICR); + ack = 0; + + if (tsicr & IGC_TSICR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_SYS_WRAP; + } + + if (tsicr & IGC_TSICR_TXTS) { + /* retrieve hardware timestamp */ + igc_ptp_tx_tstamp_event(adapter); + ack |= IGC_TSICR_TXTS; + } + + if (tsicr & IGC_TSICR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT0; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT0; + } + + if (tsicr & IGC_TSICR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT1; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT1; + } + + if (tsicr & IGC_TSICR_AUTT0) { + nsec = rd32(IGC_AUXSTMPL0); + sec = rd32(IGC_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT0; + } + + if (tsicr & IGC_TSICR_AUTT1) { + nsec = rd32(IGC_AUXSTMPL1); + sec = rd32(IGC_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(IGC_TSICR, ack); +} + +/** + * igc_msix_other - msix other interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t igc_msix_other(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_hw *hw = &adapter->hw; + u32 icr = rd32(IGC_ICR); + + /* reading ICR causes bit 31 of EICR to be cleared */ + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & IGC_ICR_LSC) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + wr32(IGC_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igc_write_itr(struct igc_q_vector *q_vector) +{ + u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = IGC_ITR_VAL_MASK; + + itr_val |= IGC_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igc_msix_ring(int irq, void *data) +{ + struct igc_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igc_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_request_msix - Initialize MSI-X interrupts + * @adapter: Pointer to adapter structure + * + * igc_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + */ +static int igc_request_msix(struct igc_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + int i = 0, err = 0, vector = 0, free_vector = 0; + struct net_device *netdev = adapter->netdev; + + err = request_irq(adapter->msix_entries[vector].vector, + &igc_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igc_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igc_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igc_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: Pointer to adapter structure + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) +{ + igc_free_q_vectors(adapter); + igc_reset_interrupt_capability(adapter); +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igc_update_phy_info(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); + + igc_get_phy_info(&adapter->hw); +} + +/** + * igc_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + */ +bool igc_has_link(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the igc_check_for_link establishes link + * for copper adapters ONLY + */ + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + + if (hw->mac.type == igc_i225) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +/** + * igc_watchdog - Timer Call-back + * @t: timer for the watchdog + */ +static void igc_watchdog(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igc_watchdog_task(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, + struct igc_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_phy_info *phy = &hw->phy; + u16 phy_data, retry_count = 20; + u32 link; + int i; + + link = igc_has_link(adapter); + + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + if (link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(IGC_CTRL); + /* Link status message must follow this format */ + netdev_info(netdev, + "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & IGC_CTRL_TFCE) && + (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : + (ctrl & IGC_CTRL_RFCE) ? "RX" : + (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGC_FLAG_EEE) && + adapter->link_duplex == HALF_DUPLEX) { + netdev_info(netdev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); + adapter->hw.dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igc_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + case SPEED_1000: + case SPEED_2500: + adapter->tx_timeout_factor = 1; + break; + } + + /* Once the launch time has been set on the wire, there + * is a delay before the link speed can be determined + * based on link-up activity. Write into the register + * as soon as we know the correct link speed. + */ + igc_tsn_adjust_txtime_offset(adapter); + + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + netdev_err(netdev, "exceed max 2 second\n"); + } + } else { + netdev_err(netdev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* Links status message must follow this format */ + netdev_info(netdev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + } + } + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(IGC_EICS, eics); + } else { + wr32(IGC_ICS, IGC_ICS_RXDMT0); + } + + igc_ptp_tx_hang(adapter); + + /* Reset the timer */ + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +/** + * igc_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr_msi(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(IGC_ICR); + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(IGC_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & IGC_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igc_free_irq(struct igc_adapter *adapter) +{ + if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igc_request_irq - initialize interrupts + * @adapter: Pointer to adapter structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static int igc_request_irq(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + err = igc_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + igc_clear_interrupt_scheme(adapter); + err = igc_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + igc_setup_all_tx_resources(adapter); + igc_setup_all_rx_resources(adapter); + igc_configure(adapter); + } + + igc_assign_vector(adapter->q_vector[0], 0); + + if (adapter->flags & IGC_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, &igc_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igc_reset_interrupt_capability(adapter); + adapter->flags &= ~IGC_FLAG_HAS_MSI; + } + + err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + netdev_err(netdev, "Error %d getting interrupt\n", err); + +request_done: + return err; +} + +/** + * __igc_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: boolean indicating if the device is resuming + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int __igc_open(struct net_device *netdev, bool resuming) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + int err = 0; + int i = 0; + + /* disallow open during test */ + + if (test_bit(__IGC_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = igc_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igc_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igc_power_up_link(adapter); + + igc_configure(adapter); + + err = igc_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + + clear_bit(__IGC_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + netif_tx_start_all_queues(netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + + return IGC_SUCCESS; + +err_set_queues: + igc_free_irq(adapter); +err_req_irq: + igc_release_hw_control(adapter); + igc_power_down_phy_copper_base(&adapter->hw); + igc_free_all_rx_resources(adapter); +err_setup_rx: + igc_free_all_tx_resources(adapter); +err_setup_tx: + igc_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igc_open(struct net_device *netdev) +{ + return __igc_open(netdev, false); +} + +/** + * __igc_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: boolean indicating the device is suspending + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int __igc_close(struct net_device *netdev, bool suspending) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igc_down(adapter); + + igc_release_hw_control(adapter); + + igc_free_irq(adapter); + + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +int igc_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igc_close(netdev, false); + return 0; +} + +/** + * igc_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return igc_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igc_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, + bool enable) +{ + struct igc_ring *ring; + + if (queue < 0 || queue >= adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + +static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) +{ + struct timespec64 b; + + b = ktime_to_timespec64(base_time); + + return timespec64_compare(now, &b) > 0; +} + +static bool validate_schedule(struct igc_adapter *adapter, + const struct tc_taprio_qopt_offload *qopt) +{ + int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; + struct timespec64 now; + size_t n; + + if (qopt->cycle_time_extension) + return false; + + igc_ptp_read(adapter, &now); + + /* If we program the controller's BASET registers with a time + * in the future, it will hold all the packets until that + * time, causing a lot of TX Hangs, so to avoid that, we + * reject schedules that would start in the future. + * Note: Limitation above is no longer in i226. + */ + if (!is_base_time_past(qopt->base_time, &now) && + igc_is_device_id_i225(hw)) + return false; + + for (n = 0; n < qopt->num_entries; n++) { + const struct tc_taprio_sched_entry *e, *prev; + int i; + + prev = n ? &qopt->entries[n - 1] : NULL; + e = &qopt->entries[n]; + + /* i225 only supports "global" frame preemption + * settings. + */ + if (e->command != TC_TAPRIO_CMD_SET_GATES) + return false; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (e->gate_mask & BIT(i)) { + queue_uses[i]++; + + /* There are limitations: A single queue cannot + * be opened and closed multiple times per cycle + * unless the gate stays open. Check for it. + */ + if (queue_uses[i] > 1 && + !(prev->gate_mask & BIT(i))) + return false; + } + } + + return true; +} + +static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ + int i; + + adapter->base_time = 0; + adapter->cycle_time = NSEC_PER_SEC; + adapter->taprio_offload_enable = false; + adapter->qbv_config_change_errors = 0; + adapter->qbv_transition = false; + adapter->qbv_count = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + ring->max_sdu = 0; + ring->oper_gate_closed = false; + ring->admin_gate_closed = false; + } + + return 0; +} + +static int igc_save_qbv_schedule(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + bool queue_configured[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; + u32 start_time = 0, end_time = 0; + struct timespec64 now; + size_t n; + int i; + + if (qopt->cmd == TAPRIO_CMD_DESTROY) + return igc_tsn_clear_schedule(adapter); + + if (qopt->cmd != TAPRIO_CMD_REPLACE) + return -EOPNOTSUPP; + + if (qopt->base_time < 0) + return -ERANGE; + + if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) + return -EALREADY; + + if (!validate_schedule(adapter, qopt)) + return -EINVAL; + + adapter->cycle_time = qopt->cycle_time; + adapter->base_time = qopt->base_time; + adapter->taprio_offload_enable = true; + + igc_ptp_read(adapter, &now); + + for (n = 0; n < qopt->num_entries; n++) { + struct tc_taprio_sched_entry *e = &qopt->entries[n]; + + end_time += e->interval; + + /* If any of the conditions below are true, we need to manually + * control the end time of the cycle. + * 1. Qbv users can specify a cycle time that is not equal + * to the total GCL intervals. Hence, recalculation is + * necessary here to exclude the time interval that + * exceeds the cycle time. + * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, + * once the end of the list is reached, it will switch + * to the END_OF_CYCLE state and leave the gates in the + * same state until the next cycle is started. + */ + if (end_time > adapter->cycle_time || + n + 1 == qopt->num_entries) + end_time = adapter->cycle_time; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!(e->gate_mask & BIT(i))) + continue; + + /* Check whether a queue stays open for more than one + * entry. If so, keep the start and advance the end + * time. + */ + if (!queue_configured[i]) + ring->start_time = start_time; + ring->end_time = end_time; + + if (ring->start_time >= adapter->cycle_time) + queue_configured[i] = false; + else + queue_configured[i] = true; + } + + start_time += e->interval; + } + + /* Check whether a queue gets configured. + * If not, set the start and end time to be end time. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!is_base_time_past(qopt->base_time, &now)) { + ring->admin_gate_closed = false; + } else { + ring->oper_gate_closed = false; + ring->admin_gate_closed = false; + } + + if (!queue_configured[i]) { + if (!is_base_time_past(qopt->base_time, &now)) + ring->admin_gate_closed = true; + else + ring->oper_gate_closed = true; + + ring->start_time = end_time; + ring->end_time = end_time; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + struct net_device *dev = adapter->netdev; + + if (qopt->max_sdu[i]) + ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; + else + ring->max_sdu = 0; + } + + return 0; +} + +static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_qbv_schedule(adapter, qopt); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; + struct net_device *netdev = adapter->netdev; + struct igc_ring *ring; + int i; + + /* i225 has two sets of credit-based shaper logic. + * Supporting it only on the top two priority queues + */ + if (queue < 0 || queue > 1) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + for (i = 0; i < IGC_MAX_SR_QUEUES; i++) + if (adapter->tx_ring[i]) + cbs_status[i] = adapter->tx_ring[i]->cbs_enable; + + /* CBS should be enabled on the highest priority queue first in order + * for the CBS algorithm to operate as intended. + */ + if (enable) { + if (queue == 1 && !cbs_status[0]) { + netdev_err(netdev, + "Enabling CBS on queue1 before queue0\n"); + return -EINVAL; + } + } else { + if (queue == 0 && cbs_status[1]) { + netdev_err(netdev, + "Disabling CBS on queue0 before queue1\n"); + return -EINVAL; + } + } + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static int igc_tsn_enable_cbs(struct igc_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_tc_query_caps(struct igc_adapter *adapter, + struct tc_query_caps_base *base) +{ + struct igc_hw *hw = &adapter->hw; + + switch (base->type) { + case TC_SETUP_QDISC_TAPRIO: { + struct tc_taprio_caps *caps = base->caps; + + caps->broken_mqprio = true; + + if (hw->mac.type == igc_i225) { + caps->supports_queue_max_sdu = true; + caps->gate_mask_per_txq = true; + } + + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + adapter->tc_setup_type = type; + + switch (type) { + case TC_QUERY_CAPS: + return igc_tc_query_caps(adapter, type_data); + case TC_SETUP_QDISC_TAPRIO: + return igc_tsn_enable_qbv_scheduling(adapter, type_data); + + case TC_SETUP_QDISC_ETF: + return igc_tsn_enable_launchtime(adapter, type_data); + + case TC_SETUP_QDISC_CBS: + return igc_tsn_enable_cbs(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return igc_xdp_setup_pool(adapter, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int igc_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int i, drops; + + if (unlikely(test_bit(__IGC_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + + drops = 0; + for (i = 0; i < num_frames; i++) { + int err; + struct xdp_frame *xdpf = frames[i]; + + err = igc_xdp_init_tx_descriptor(ring, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + igc_flush_tx_descriptors(ring); + + __netif_tx_unlock(nq); + + return num_frames - drops; +} + +static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, + struct igc_q_vector *q_vector) +{ + struct igc_hw *hw = &adapter->hw; + u32 eics = 0; + + eics |= q_vector->eims_value; + wr32(IGC_EICS, eics); +} + +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_q_vector *q_vector; + struct igc_ring *ring; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!igc_xdp_is_enabled(adapter)) + return -ENXIO; + + if (queue_id >= adapter->num_rx_queues) + return -EINVAL; + + ring = adapter->rx_ring[queue_id]; + + if (!ring->xsk_pool) + return -ENXIO; + + q_vector = adapter->q_vector[queue_id]; + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + igc_trigger_rxtxq_interrupt(adapter, q_vector); + + return 0; +} + +static const struct net_device_ops igc_netdev_ops = { + .ndo_open = igc_open, + .ndo_stop = igc_close, + .ndo_start_xmit = igc_xmit_frame, + .ndo_set_rx_mode = igc_set_rx_mode, + .ndo_set_mac_address = igc_set_mac, + .ndo_change_mtu = igc_change_mtu, + .ndo_tx_timeout = igc_tx_timeout, + .ndo_get_stats64 = igc_get_stats64, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, + .ndo_eth_ioctl = igc_ioctl, + .ndo_setup_tc = igc_setup_tc, + .ndo_bpf = igc_bpf, + .ndo_xdp_xmit = igc_xdp_xmit, + .ndo_xsk_wakeup = igc_xsk_wakeup, +}; + +/* PCIe configuration access */ +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_read_word(adapter->pdev, reg, value); + + return IGC_SUCCESS; +} + +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_write_word(adapter->pdev, reg, *value); + + return IGC_SUCCESS; +} + +u32 igc_rd32(struct igc_hw *hw, u32 reg) +{ + struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (IGC_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igc->netdev; + + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + WARN(pci_device_is_present(igc->pdev), + "igc: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +/* Mapping HW RSS Type to enum xdp_rss_hash_type */ +static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = { + [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2, + [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP, + [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4, + [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP, + [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX, + [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6, + [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, + [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP, + [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP, + [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX, + [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ + [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */ + [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */ + [13] = XDP_RSS_TYPE_NONE, + [14] = XDP_RSS_TYPE_NONE, + [15] = XDP_RSS_TYPE_NONE, +}; + +static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) +{ + const struct igc_xdp_buff *ctx = (void *)_ctx; + + if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) + return -ENODATA; + + *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); + *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; + + return 0; +} + +static const struct xdp_metadata_ops igc_xdp_metadata_ops = { + .xmo_rx_hash = igc_xdp_rx_hash, +}; + +static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) +{ + struct igc_adapter *adapter = container_of(timer, struct igc_adapter, + hrtimer); + unsigned int i; + + adapter->qbv_transition = true; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->admin_gate_closed) { + tx_ring->admin_gate_closed = false; + tx_ring->oper_gate_closed = true; + } else { + tx_ring->oper_gate_closed = false; + } + } + adapter->qbv_transition = false; + return HRTIMER_NORESTART; +} + +/** + * igc_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igc_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igc_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring the adapter private structure, + * and a hardware reset occur. + */ +static int igc_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct igc_adapter *adapter; + struct net_device *netdev; + struct igc_hw *hw; + const struct igc_info *ei = igc_info_tbl[ent->driver_data]; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + + err = pci_request_mem_regions(pdev, igc_driver_name); + if (err) + goto err_pci_reg; + + err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), + IGC_MAX_TX_QUEUES); + + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->port_num = hw->bus.func; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = pci_save_state(pdev); + if (err) + goto err_ioremap; + + err = -EIO; + adapter->io_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!adapter->io_addr) + goto err_ioremap; + + /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igc_netdev_ops; + netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; + igc_ethtool_set_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC and PHY function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* Add supported features to the features list*/ + netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_TSO_ECN; + netdev->features |= NETIF_F_RXHASH; + netdev->features |= NETIF_F_RXCSUM; + netdev->features |= NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_HW_TC; + +#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; + + /* setup the private structure */ + err = igc_sw_init(adapter); + if (err) + goto err_sw_init; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= netdev->features; + + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + if (igc_get_flash_presence_i225(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + eth_hw_addr_set(netdev, hw->mac.addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + /* configure RXPBSIZE and TXPBSIZE */ + wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + + timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igc_reset_task); + INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); + + hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + adapter->hrtimer.function = &igc_qbv_scheduling_timer; + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0xaf; + + hw->fc.requested_mode = igc_fc_default; + hw->fc.current_mode = igc_fc_default; + + /* By default, support wake on port A */ + adapter->flags |= IGC_FLAG_WOL_SUPPORTED; + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) + adapter->wol |= IGC_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGC_FLAG_WOL_SUPPORTED); + + igc_ptp_init(adapter); + + igc_tsn_clear_schedule(adapter); + + /* reset the hardware with the new settings */ + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + strncpy(netdev->name, "eth%d", IFNAMSIZ); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + + /* print pcie link status and MAC address */ + pcie_print_link_status(pdev); + netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + /* Disable EEE for internal PHY devices */ + hw->dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + igc_set_eee_i225(hw, false, false, false); + + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + igc_release_hw_control(adapter); +err_eeprom: + if (!igc_check_reset_block(hw)) + igc_reset_phy(hw); +err_sw_init: + igc_clear_interrupt_scheme(adapter); + iounmap(adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igc_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igc_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void igc_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_noresume(&pdev->dev); + + igc_flush_nfc_rules(adapter); + + igc_ptp_stop(adapter); + + pci_disable_ptm(pdev); + pci_clear_master(pdev); + + set_bit(__IGC_DOWN, &adapter->state); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + hrtimer_cancel(&adapter->hrtimer); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + unregister_netdev(netdev); + + igc_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, adapter->io_addr); + pci_release_mem_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; + struct igc_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + bool wake; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igc_close(netdev, true); + + igc_ptp_suspend(adapter); + + igc_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_LU) + wufc &= ~IGC_WUFC_LNKC; + + if (wufc) { + igc_setup_rctl(adapter); + igc_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IGC_WUFC_MC) { + rctl = rd32(IGC_RCTL); + rctl |= IGC_RCTL_MPE; + wr32(IGC_RCTL, rctl); + } + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_ADVD3WUC; + wr32(IGC_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igc_disable_pcie_master(hw); + + wr32(IGC_WUC, IGC_WUC_PME_EN); + wr32(IGC_WUFC, wufc); + } else { + wr32(IGC_WUC, 0); + wr32(IGC_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igc_power_down_phy_copper_base(&adapter->hw); + else + igc_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int __maybe_unused igc_runtime_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 1); +} + +static void igc_deliver_wake_packet(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if (wupl == 0 || wupl > IGC_WUPM_BYTES) + return; + + skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igc_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + val = rd32(IGC_WUS); + if (val & WAKE_PKT_WUS) + igc_deliver_wake_packet(netdev); + + wr32(IGC_WUS, ~0); + + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igc_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; +} + +static int __maybe_unused igc_runtime_resume(struct device *dev) +{ + return igc_resume(dev); +} + +static int __maybe_unused igc_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused igc_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (!igc_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} +#endif /* CONFIG_PM */ + +static void igc_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igc_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * igc_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current PCI connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igc_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igc_io_slot_reset - called after the PCI bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igc_resume routine. + **/ +static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + netdev_err(netdev, "Could not re-enable PCI device after reset\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter loses its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igc_reset(adapter); + wr32(IGC_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igc_io_resume - called when traffic can start to flow again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igc_resume routine. + */ +static void igc_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + if (igc_open(netdev)) { + netdev_err(netdev, "igc_open failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + rtnl_unlock(); +} + +static const struct pci_error_handlers igc_err_handler = { + .error_detected = igc_io_error_detected, + .slot_reset = igc_io_slot_reset, + .resume = igc_io_resume, +}; + +#ifdef CONFIG_PM +static const struct dev_pm_ops igc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) + SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, + igc_runtime_idle) +}; +#endif + +static struct pci_driver igc_driver = { + .name = igc_driver_name, + .id_table = igc_pci_tbl, + .probe = igc_probe, + .remove = igc_remove, +#ifdef CONFIG_PM + .driver.pm = &igc_pm_ops, +#endif + .shutdown = igc_shutdown, + .err_handler = &igc_err_handler, +}; + +/** + * igc_reinit_queues - return error + * @adapter: pointer to adapter structure + */ +int igc_reinit_queues(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (netif_running(netdev)) + igc_close(netdev); + + igc_reset_interrupt_capability(adapter); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igc_open(netdev); + + return err; +} + +/** + * igc_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + */ +struct net_device *igc_get_hw_dev(struct igc_hw *hw) +{ + struct igc_adapter *adapter = hw->back; + + return adapter->netdev; +} + +static void igc_disable_rx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 rxdctl; + + rxdctl = rd32(IGC_RXDCTL(idx)); + rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; + rxdctl |= IGC_RXDCTL_SWFLUSH; + wr32(IGC_RXDCTL(idx), rxdctl); +} + +void igc_disable_rx_ring(struct igc_ring *ring) +{ + igc_disable_rx_ring_hw(ring); + igc_clean_rx_ring(ring); +} + +void igc_enable_rx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_rx_ring(adapter, ring); + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); +} + +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 txdctl; + + txdctl = rd32(IGC_TXDCTL(idx)); + txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; + txdctl |= IGC_TXDCTL_SWFLUSH; + wr32(IGC_TXDCTL(idx), txdctl); +} + +void igc_disable_tx_ring(struct igc_ring *ring) +{ + igc_disable_tx_ring_hw(ring); + igc_clean_tx_ring(ring); +} + +void igc_enable_tx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_tx_ring(adapter, ring); +} + +/** + * igc_init_module - Driver Registration Routine + * + * igc_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init igc_init_module(void) +{ + int ret; + + pr_info("%s\n", igc_driver_string); + pr_info("%s\n", igc_copyright); + + ret = pci_register_driver(&igc_driver); + return ret; +} + +module_init(igc_init_module); + +/** + * igc_exit_module - Driver Exit Cleanup Routine + * + * igc_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit igc_exit_module(void) +{ + pci_unregister_driver(&igc_driver); +} + +module_exit(igc_exit_module); +/* igc_main.c */ diff --git a/devices/igc/igc_main-6.6-ethercat.c b/devices/igc/igc_main-6.6-ethercat.c new file mode 100644 index 00000000..7faa0df1 --- /dev/null +++ b/devices/igc/igc_main-6.6-ethercat.c @@ -0,0 +1,7589 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "igc-6.6-ethercat.h" +#include "igc_hw-6.6-ethercat.h" +#include "igc_tsn-6.6-ethercat.h" +#include "igc_xdp-6.6-ethercat.h" + +#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver (EtherCAT enabled)" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +#define IGC_XDP_PASS 0 +#define IGC_XDP_CONSUMED BIT(0) +#define IGC_XDP_TX BIT(1) +#define IGC_XDP_REDIRECT BIT(2) + +static int debug = -1; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL v2"); +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +char igc_driver_name[] = "ec_igc"; +static const char igc_driver_string[] = DRV_SUMMARY; +static const char igc_copyright[] = + "Copyright(c) 2018 Intel Corporation."; + +static const struct igc_info *igc_info_tbl[] = { + [board_base] = &igc_base_info, +}; + +static const struct pci_device_id igc_pci_tbl[] = { + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, + /* required last entry */ + {0, } +}; + +// MODULE_DEVICE_TABLE(pci, igc_pci_tbl); + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +void igc_reset(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition PBA for greater than 9k MTU if required */ + pba = IGC_PBA_34K; + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + hw->mac.ops.reset_hw(hw); + + if (hw->mac.ops.init_hw(hw)) + netdev_err(dev, "Error on hardware initialization\n"); + + /* Re-establish EEE setting */ + igc_set_eee_i225(hw, true, true, true); + + if (!adapter->ecdev && !netif_running(adapter->netdev)) + igc_power_down_phy_copper_base(&adapter->hw); + + /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ + wr32(IGC_VET, ETH_P_8021Q); + + /* Re-enable PTP, where applicable. */ + igc_ptp_reset(adapter); + + /* Re-enable TSN offloading, where applicable. */ + igc_tsn_reset(adapter); + + igc_get_phy_info(hw); +} + +/** + * igc_power_up_link - Power up the phy link + * @adapter: address of board private structure + */ +static void igc_power_up_link(struct igc_adapter *adapter) +{ + igc_reset_phy(&adapter->hw); + + igc_power_up_phy_copper(&adapter->hw); + + igc_setup_link(&adapter->hw); +} + +/** + * igc_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void igc_release_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + if (!pci_device_is_present(adapter->pdev)) + return; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); +} + +/** + * igc_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + */ +static void igc_get_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); +} + +static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) +{ + dma_unmap_single(dev, dma_unmap_addr(buf, dma), + dma_unmap_len(buf, len), DMA_TO_DEVICE); + + dma_unmap_len_set(buf, len, 0); +} + +/** + * igc_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + */ +static void igc_clean_tx_ring(struct igc_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + u32 xsk_frames = 0; + + while (i != tx_ring->next_to_use) { + union igc_adv_tx_desc *eop_desc, *tx_desc; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + { + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + if (!adapter->ecdev) { + /* skb is reused in EtherCAT TX operation */ + dev_kfree_skb_any(tx_buffer->skb); + } + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + } + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGC_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + if (tx_ring->xsk_pool && xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* Zero out the buffer ring */ + memset(tx_ring->tx_buffer_info, 0, + sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igc_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + */ +void igc_free_tx_resources(struct igc_ring *tx_ring) +{ + igc_disable_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igc_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void igc_free_all_tx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igc_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_tx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igc_clean_tx_ring(adapter->tx_ring[i]); +} + +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 txdctl; + + txdctl = rd32(IGC_TXDCTL(idx)); + txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; + txdctl |= IGC_TXDCTL_SWFLUSH; + wr32(IGC_TXDCTL(idx), txdctl); +} + +/** + * igc_disable_all_tx_rings_hw - Disable all transmit queue operation + * @adapter: board private structure + */ +static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + igc_disable_tx_ring_hw(tx_ring); + } +} + +/** + * igc_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + */ +int igc_setup_tx_resources(struct igc_ring *tx_ring) +{ + struct net_device *ndev = tx_ring->netdev; + struct device *dev = tx_ring->dev; + int size = 0; + + size = sizeof(struct igc_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_tx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igc_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + netdev_err(dev, "Error on Tx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } +} + +static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) +{ + struct igc_rx_buffer *bi; + u16 i; + + for (i = 0; i < ring->count; i++) { + bi = &ring->rx_buffer_info[i]; + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + } +} + +/** + * igc_clean_rx_ring - Free Rx Buffers per Queue + * @ring: ring to free buffers from + */ +static void igc_clean_rx_ring(struct igc_ring *ring) +{ + if (ring->xsk_pool) + igc_clean_rx_ring_xsk_pool(ring); + else + igc_clean_rx_ring_page_shared(ring); + + clear_ring_uses_large_buffer(ring); + + ring->next_to_alloc = 0; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +/** + * igc_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_rx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igc_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igc_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + */ +void igc_free_rx_resources(struct igc_ring *rx_ring) +{ + igc_clean_rx_ring(rx_ring); + + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igc_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + */ +static void igc_free_all_rx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + igc_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igc_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + */ +int igc_setup_rx_resources(struct igc_ring *rx_ring) +{ + struct net_device *ndev = rx_ring->netdev; + struct device *dev = rx_ring->dev; + u8 index = rx_ring->queue_index; + int size, desc_len, res; + + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, + rx_ring->q_vector->napi.napi_id); + if (res < 0) { + netdev_err(ndev, "Failed to register xdp_rxq index %u\n", + index); + return res; + } + + size = sizeof(struct igc_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + desc_len = sizeof(union igc_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_rx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igc_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + netdev_err(dev, "Error on Rx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + if (!igc_xdp_is_enabled(adapter) || + !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) + return NULL; + + return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); +} + +/** + * igc_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + union igc_adv_rx_desc *rx_desc; + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + u64 rdba = ring->dma; + u32 buf_size; + + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL)); + } + + if (igc_xdp_is_enabled(adapter)) + set_ring_uses_large_buffer(ring); + + /* disable the queue */ + wr32(IGC_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(IGC_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(IGC_RDBAH(reg_idx), rdba >> 32); + wr32(IGC_RDLEN(reg_idx), + ring->count * sizeof(union igc_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + IGC_RDT(reg_idx); + wr32(IGC_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* reset next-to- use/clean to place SW in sync with hardware */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + if (ring->xsk_pool) + buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); + else if (ring_uses_large_buffer(ring)) + buf_size = IGC_RXBUFFER_3072; + else + buf_size = IGC_RXBUFFER_2048; + + srrctl = rd32(IGC_SRRCTL(reg_idx)); + srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | + IGC_SRRCTL_DESCTYPE_MASK); + srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); + srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); + srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; + + wr32(IGC_SRRCTL(reg_idx), srrctl); + + rxdctl |= IGC_RX_PTHRESH; + rxdctl |= IGC_RX_HTHRESH << 8; + rxdctl |= IGC_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igc_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGC_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; + + wr32(IGC_RXDCTL(reg_idx), rxdctl); +} + +/** + * igc_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx(struct igc_adapter *adapter) +{ + int i; + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + igc_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igc_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + */ +static void igc_configure_tx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + u32 txdctl = 0; + + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + + /* disable the queue */ + wr32(IGC_TXDCTL(reg_idx), 0); + wrfl(); + + wr32(IGC_TDLEN(reg_idx), + ring->count * sizeof(union igc_adv_tx_desc)); + wr32(IGC_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(IGC_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + IGC_TDT(reg_idx); + wr32(IGC_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGC_TX_PTHRESH; + txdctl |= IGC_TX_HTHRESH << 8; + txdctl |= IGC_TX_WTHRESH << 16; + + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + wr32(IGC_TXDCTL(reg_idx), txdctl); +} + +/** + * igc_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + */ +static void igc_configure_tx(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igc_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + */ +static void igc_setup_mrqc(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); +} + +/** + * igc_setup_rctl - configure the receive control registers + * @adapter: Board private structure + */ +static void igc_setup_rctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(IGC_RCTL); + + rctl &= ~(3 << IGC_RCTL_MO_SHIFT); + rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); + + rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); + + /* enable stripping of CRC. Newer features require + * that the HW strips the CRC. + */ + rctl |= IGC_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= IGC_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(IGC_RXDCTL(0), 0); + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in set_rx_mode + */ + rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ + IGC_RCTL_BAM | /* RX All Bcast Pkts */ + IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ + IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ + } + + wr32(IGC_RCTL, rctl); +} + +/** + * igc_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + */ +static void igc_setup_tctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which icould be enabled by default */ + wr32(IGC_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_CT; + tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | + (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); + + /* Enable transmits */ + tctl |= IGC_TCTL_EN; + + wr32(IGC_TCTL, tctl); +} + +/** + * igc_set_mac_filter_hw() - Set MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be set + * @index: Filter index + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + */ +static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, + enum igc_mac_filter_type type, + const u8 *addr, int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 ral, rah; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + ral = le32_to_cpup((__le32 *)(addr)); + rah = le16_to_cpup((__le16 *)(addr + 4)); + + if (type == IGC_MAC_FILTER_TYPE_SRC) { + rah &= ~IGC_RAH_ASEL_MASK; + rah |= IGC_RAH_ASEL_SRC_ADDR; + } + + if (queue >= 0) { + rah &= ~IGC_RAH_QSEL_MASK; + rah |= (queue << IGC_RAH_QSEL_SHIFT); + rah |= IGC_RAH_QSEL_ENABLE; + } + + rah |= IGC_RAH_AV; + + wr32(IGC_RAL(index), ral); + wr32(IGC_RAH(index), rah); + + netdev_dbg(dev, "MAC address filter set in HW: index %d", index); +} + +/** + * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be cleared + * @index: Filter index + */ +static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + wr32(IGC_RAL(index), 0); + wr32(IGC_RAH(index), 0); + + netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igc_set_default_mac_filter(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + u8 *addr = adapter->hw.mac.addr; + + netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); + + igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +/** + * igc_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int igc_set_mac(struct net_device *netdev, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igc_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igc_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igc_write_mc_addr_list(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igc_update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igc_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, + bool *first_flag, bool *insert_empty) +{ + struct igc_adapter *adapter = netdev_priv(ring->netdev); + ktime_t cycle_time = adapter->cycle_time; + ktime_t base_time = adapter->base_time; + ktime_t now = ktime_get_clocktai(); + ktime_t baset_est, end_of_cycle; + s32 launchtime; + s64 n; + + n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + + baset_est = ktime_add_ns(base_time, cycle_time * (n)); + end_of_cycle = ktime_add_ns(baset_est, cycle_time); + + if (ktime_compare(txtime, end_of_cycle) >= 0) { + if (baset_est != ring->last_ff_cycle) { + *first_flag = true; + ring->last_ff_cycle = baset_est; + + if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) + *insert_empty = true; + } + } + + /* Introducing a window at end of cycle on which packets + * potentially not honor launchtime. Window of 5us chosen + * considering software update the tail pointer and packets + * are dma'ed to packet buffer. + */ + if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) + netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", + txtime); + + ring->last_tx_cycle = end_of_cycle; + + launchtime = ktime_sub_ns(txtime, baset_est); + if (launchtime > 0) + div_s64_rem(launchtime, cycle_time, &launchtime); + else + launchtime = 0; + + return cpu_to_le32(launchtime); +} + +static int igc_init_empty_frame(struct igc_ring *ring, + struct igc_tx_buffer *buffer, + struct sk_buff *skb) +{ + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + + dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->protocol = 0; + buffer->bytecount = skb->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, skb->len); + dma_unmap_addr_set(buffer, dma, dma); + + return 0; +} + +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) +{ + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + err = igc_init_empty_frame(ring, first, skb); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + first->bytecount; + olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + + netdev_tx_sent_queue(txring_txq(ring), skb->len); + + first->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 + +static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, + __le32 launch_time, bool first_flag, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct igc_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IGC_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; + + /* For i225, context index must be unique per ring. */ + if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + if (first_flag) + mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + context_desc->launch_time = launch_time; +} + +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGC_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, 0); +} + +static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + struct igc_adapter *adapter = netdev_priv(netdev); + if (!adapter->ecdev) { + netif_stop_subqueue(netdev, tx_ring->queue_index); + } + + /* memory barriier comment */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igc_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + if (!adapter->ecdev) { + netif_wake_subqueue(netdev, tx_ring->queue_index); + } + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + if (igc_desc_unused(tx_ring) >= size) + return 0; + return __igc_maybe_stop_tx(tx_ring, size); +} + +#define IGC_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IGC_ADVTXD_DTYP_DATA | + IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, + IGC_ADVTXD_DCMD_VLE); + + /* set segmentation bits for TSO */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, + (IGC_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present, will select the register set + * based on the _TSTAMP(_X) bit. + */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, + (IGC_ADVTXD_MAC_TSTAMP)); + + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1, + (IGC_ADVTXD_TSTAMP_REG_1)); + + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2, + (IGC_ADVTXD_TSTAMP_REG_2)); + + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3, + (IGC_ADVTXD_TSTAMP_REG_3)); + + /* insert frame checksum */ + cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igc_tx_olinfo_status(struct igc_ring *tx_ring, + union igc_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; + + /* insert L4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * + ((IGC_TXD_POPTS_TXSM << 8) / + IGC_TX_FLAGS_CSUM); + + /* insert IPv4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * + (((IGC_TXD_POPTS_IXSM << 8)) / + IGC_TX_FLAGS_IPV4); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int igc_tx_map(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 tx_flags = first->tx_flags; + skb_frag_t *frag; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + dma_addr_t dma; + u32 cmd_type; + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + + cmd_type = igc_tx_cmd_type(skb, tx_flags); + tx_desc = IGC_TX_DESC(tx_ring, i); + + igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGC_MAX_DATA_PER_TXD; + size -= IGC_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGC_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igc_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + + return 0; +dma_error: + netdev_err(tx_ring->netdev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (!adapter->ecdev) { + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + } + + tx_ring->next_to_use = i; + + return -1; +} + +static int igc_tso(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM | + IGC_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, mss_l4len_idx); + + return 1; +} + +static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags) +{ + int i; + + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; + + if (tstamp->skb) + continue; + + tstamp->skb = skb_get(skb); + tstamp->start = jiffies; + *flags = tstamp->flags; + + return true; + } + + return false; +} + +static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, + struct igc_ring *tx_ring) +{ + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + bool first_flag = false, insert_empty = false; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + struct igc_tx_buffer *first; + __le32 launch_time = 0; + u32 tx_flags = 0; + unsigned short f; + ktime_t txtime; + u8 hdr_len = 0; + int tso = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igc_maybe_stop_tx(tx_ring, count + 5)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (!tx_ring->launchtime_enable) + goto done; + + txtime = skb->tstamp; + skb->tstamp = ktime_set(0, 0); + launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + + if (insert_empty) { + struct igc_tx_buffer *empty_info; + struct sk_buff *empty; + void *data; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (!empty) + goto done; + + data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + if (igc_init_tx_empty_descriptor(tx_ring, + empty, + empty_info) < 0) + dev_kfree_skb_any(empty); + } + +done: + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGC_TX_BUFFER_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (adapter->qbv_transition || tx_ring->oper_gate_closed) + goto out_drop; + + if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { + adapter->stats.txdrop++; + goto out_drop; + } + + if (unlikely(!adapter->ecdev && + test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { + /* FIXME: add support for retrieving timestamps from + * the other timer registers before skipping the + * timestamping request. + */ + unsigned long flags; + u32 tstamp_flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags; + } else { + adapter->tx_hwtstamp_skipped++; + } + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGC_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igc_tx_csum(tx_ring, first, launch_time, first_flag); + + igc_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + if (!adapter->ecdev) { + dev_kfree_skb_any(first->skb); + first->skb = NULL; + } + + return NETDEV_TX_OK; +} + +static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + + return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); +} + +static void igc_rx_checksum(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igc_test_staterr(rx_desc, + IGC_RXDEXT_STATERR_L4E | + IGC_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets (aka let the stack check the crc32c) + */ + if (!(skb->len == 60 && + test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | + IGC_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netdev_dbg(ring->netdev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +/* Mapping HW RSS Type to enum pkt_hash_types */ +static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { + [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2, + [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, + [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ + [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */ + [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */ + [13] = PKT_HASH_TYPE_NONE, + [14] = PKT_HASH_TYPE_NONE, + [15] = PKT_HASH_TYPE_NONE, +}; + +static inline void igc_rx_hash(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) { + u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + u32 rss_type = igc_rss_type(rx_desc); + + skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]); + } +} + +static void igc_rx_vlan(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + u16 vid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { + if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && + test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/** + * igc_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in order + * to populate the hash, checksum, VLAN, protocol, and other fields within the + * skb. + */ +static void igc_process_skb_fields(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + igc_rx_hash(rx_ring, rx_desc, skb); + + igc_rx_checksum(rx_ring, rx_desc, skb); + + igc_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl |= IGC_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~IGC_CTRL_VME; + } + wr32(IGC_CTRL, ctrl); +} + +static void igc_restore_vlan(struct igc_adapter *adapter) +{ + igc_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, + const unsigned int size, + int *rx_buffer_pgcnt) +{ + struct igc_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, + unsigned int truesize) +{ +#if (PAGE_SIZE < 8192) + buffer->page_offset ^= truesize; +#else + buffer->page_offset += truesize; +#endif +} + +static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(ring) / 2; +#else + truesize = ring_uses_build_skb(ring) ? + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +/** + * igc_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + */ +static void igc_add_rx_frag(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(rx_ring) / 2; +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + + igc_rx_buffer_flip(rx_buffer, truesize); +} + +static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp) +{ + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* build an skb around the page buffer */ + skb = napi_build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, size); + if (metasize) + skb_metadata_set(skb, metasize); + + igc_rx_buffer_flip(rx_buffer, truesize); + return skb; +} + +static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + void *va = xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IGC_RX_HDR_LEN + metasize); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGC_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, + ALIGN(headlen + metasize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); + igc_rx_buffer_flip(rx_buffer, truesize); + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * igc_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void igc_reuse_rx_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct igc_rx_buffer *new_buff; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGC_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igc_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + */ +static bool igc_is_non_eop(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGC_RX_DESC(rx_ring, ntc)); + + if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igc_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool igc_cleanup_headers(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static void igc_put_rx_buffer(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + /* hand second half of page back to the ring */ + igc_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) +{ + struct igc_adapter *adapter = rx_ring->q_vector->adapter; + + if (ring_uses_build_skb(rx_ring)) + return IGC_SKB_PAD; + if (igc_xdp_is_enabled(adapter)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igc_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igc_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: rx descriptor ring + * @cleaned_count: number of buffers to clean + */ +static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) +{ + union igc_adv_rx_desc *rx_desc; + u16 i = rx_ring->next_to_use; + struct igc_rx_buffer *bi; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGC_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igc_rx_bufsz(rx_ring); + + do { + if (!igc_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGC_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) +{ + union igc_adv_rx_desc *desc; + u16 i = ring->next_to_use; + struct igc_rx_buffer *bi; + dma_addr_t dma; + bool ok = true; + + if (!count) + return ok; + + XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff); + + desc = IGC_RX_DESC(ring, i); + bi = &ring->rx_buffer_info[i]; + i -= ring->count; + + do { + bi->xdp = xsk_buff_alloc(ring->xsk_pool); + if (!bi->xdp) { + ok = false; + break; + } + + dma = xsk_buff_xdp_get_dma(bi->xdp); + desc->read.pkt_addr = cpu_to_le64(dma); + + desc++; + bi++; + i++; + if (unlikely(!i)) { + desc = IGC_RX_DESC(ring, 0); + bi = ring->rx_buffer_info; + i -= ring->count; + } + + /* Clear the length for the next_to_use descriptor. */ + desc->wb.upper.length = 0; + + count--; + } while (count); + + i += ring->count; + + if (ring->next_to_use != i) { + ring->next_to_use = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, ring->tail); + } + + return ok; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, + struct xdp_frame *xdpf) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 count, index = ring->next_to_use; + struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; + struct igc_tx_buffer *buffer = head; + union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); + u32 olinfo_status, len = xdpf->len, cmd_type; + void *data = xdpf->data; + u16 i; + + count = TXD_USE_COUNT(len); + for (i = 0; i < nr_frags; i++) + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); + + if (igc_maybe_stop_tx(ring, count + 3)) { + /* this is a hard error */ + return -EBUSY; + } + + i = 0; + head->bytecount = xdp_get_frame_len(xdpf); + head->type = IGC_TX_BUFFER_TYPE_XDP; + head->gso_segs = 1; + head->xdpf = xdpf; + + olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + for (;;) { + dma_addr_t dma; + + dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, + "Failed to map DMA for TX\n"); + goto unmap; + } + + dma_unmap_len_set(buffer, len, len); + dma_unmap_addr_set(buffer, dma, dma); + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | len; + + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.buffer_addr = cpu_to_le64(dma); + + buffer->protocol = 0; + + if (++index == ring->count) + index = 0; + + if (i == nr_frags) + break; + + buffer = &ring->tx_buffer_info[index]; + desc = IGC_TX_DESC(ring, index); + desc->read.olinfo_status = 0; + + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } + desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); + + netdev_tx_sent_queue(txring_txq(ring), head->bytecount); + /* set the timestamp */ + head->time_stamp = jiffies; + /* set next_to_watch value indicating a packet is present */ + head->next_to_watch = desc; + ring->next_to_use = index; + + return 0; + +unmap: + for (;;) { + buffer = &ring->tx_buffer_info[index]; + if (dma_unmap_len(buffer, len)) + dma_unmap_page(ring->dev, + dma_unmap_addr(buffer, dma), + dma_unmap_len(buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(buffer, len, 0); + if (buffer == head) + break; + + if (!index) + index += ring->count; + index--; + } + + return -ENOMEM; +} + +static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= adapter->num_tx_queues) + index -= adapter->num_tx_queues; + + return adapter->tx_ring[index]; +} + +static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int res; + + if (unlikely(!xdpf)) + return -EFAULT; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + res = igc_xdp_init_tx_descriptor(ring, xdpf); + __netif_tx_unlock(nq); + return res; +} + +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __igc_xdp_run_prog(struct igc_adapter *adapter, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act = bpf_prog_run_xdp(prog, xdp); + + switch (act) { + case XDP_PASS: + return IGC_XDP_PASS; + case XDP_TX: + if (igc_xdp_xmit_back(adapter, xdp) < 0) + goto out_failure; + return IGC_XDP_TX; + case XDP_REDIRECT: + if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) + goto out_failure; + return IGC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + return IGC_XDP_CONSUMED; + } +} + +static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + prog = READ_ONCE(adapter->xdp_prog); + if (!prog) { + res = IGC_XDP_PASS; + goto out; + } + + res = __igc_xdp_run_prog(adapter, prog, xdp); + +out: + return ERR_PTR(-res); +} + +/* This function assumes __netif_tx_lock is held by the caller. */ +static void igc_flush_tx_descriptors(struct igc_ring *ring) +{ + /* Once tail pointer is updated, hardware can fetch the descriptors + * any time so we issue a write membar here to ensure all memory + * writes are complete before the tail pointer is updated. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static void igc_finalize_xdp(struct igc_adapter *adapter, int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + + if (status & IGC_XDP_TX) { + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + igc_flush_tx_descriptors(ring); + __netif_tx_unlock(nq); + } + + if (status & IGC_XDP_REDIRECT) + xdp_do_flush(); +} + +static void igc_update_rx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->rx.ring; + + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.packets += packets; + ring->rx_stats.bytes += bytes; + u64_stats_update_end(&ring->rx_syncp); + + q_vector->rx.total_packets += packets; + q_vector->rx.total_bytes += bytes; +} + +static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + u16 cleaned_count = igc_desc_unused(rx_ring); + int xdp_status = 0, rx_buffer_pgcnt; + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *rx_desc; + struct igc_rx_buffer *rx_buffer; + unsigned int size, truesize; + struct igc_xdp_buff ctx; + ktime_t timestamp = 0; + int pkt_offset = 0; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGC_RX_BUFFER_WRITE) { + igc_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); + truesize = igc_get_rx_frame_truesize(rx_ring, size); + + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + pktbuf); + ctx.rx_ts = timestamp; + pkt_offset = IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + if (!skb) { + xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); + xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), + igc_rx_offset(rx_ring) + pkt_offset, + size, true); + xdp_buff_clear_frags_flag(&ctx.xdp); + ctx.rx_desc = rx_desc; + + skb = igc_xdp_run_prog(adapter, &ctx.xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + switch (xdp_res) { + case IGC_XDP_CONSUMED: + rx_buffer->pagecnt_bias++; + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + igc_rx_buffer_flip(rx_buffer, truesize); + xdp_status |= xdp_res; + break; + } + + total_packets++; + total_bytes += size; + } + else if (adapter->ecdev) { + unsigned char *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); + ecdev_receive(adapter->ecdev, va, size); + adapter->ec_watchdog_jiffies = jiffies; + igc_reuse_rx_page(rx_ring, rx_buffer); + } + else { + if (skb) + igc_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp); + else + skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp, + timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); + } + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igc_is_non_eop(rx_ring, rx_desc)) + continue; + + + if (adapter->ecdev) { + total_packets++; + continue; + } + + /* verify the packet layout is correct */ + if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + igc_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (cleaned_count) + igc_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, + struct xdp_buff *xdp) +{ + unsigned int totalsize = xdp->data_end - xdp->data_meta; + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + net_prefetch(xdp->data_meta); + + skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + return skb; +} + +static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, + union igc_adv_rx_desc *desc, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + struct igc_ring *ring = q_vector->rx.ring; + struct sk_buff *skb; + + skb = igc_construct_skb_zc(ring, xdp); + if (!skb) { + ring->rx_stats.alloc_failed++; + return; + } + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + if (igc_cleanup_headers(ring, desc, skb)) + return; + + igc_process_skb_fields(ring, desc, skb); + napi_gro_receive(&q_vector->napi, skb); +} + +static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp) +{ + /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The + * igc_xdp_buff shares its layout with xdp_buff_xsk and private + * igc_xdp_buff fields fall into xdp_buff_xsk->cb + */ + return (struct igc_xdp_buff *)xdp; +} + +static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *ring = q_vector->rx.ring; + u16 cleaned_count = igc_desc_unused(ring); + int total_bytes = 0, total_packets = 0; + u16 ntc = ring->next_to_clean; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + + rcu_read_lock(); + + prog = READ_ONCE(adapter->xdp_prog); + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *desc; + struct igc_rx_buffer *bi; + struct igc_xdp_buff *ctx; + ktime_t timestamp = 0; + unsigned int size; + int res; + + desc = IGC_RX_DESC(ring, ntc); + size = le16_to_cpu(desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = &ring->rx_buffer_info[ntc]; + + ctx = xsk_buff_to_igc_ctx(bi->xdp); + ctx->rx_desc = desc; + + if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + bi->xdp->data); + ctx->rx_ts = timestamp; + + bi->xdp->data += IGC_TS_HDR_LEN; + + /* HW timestamp has been copied into local variable. Metadata + * length when XDP program is called should be 0. + */ + bi->xdp->data_meta += IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); + + res = __igc_xdp_run_prog(adapter, prog, bi->xdp); + switch (res) { + case IGC_XDP_PASS: + igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); + fallthrough; + case IGC_XDP_CONSUMED: + xsk_buff_free(bi->xdp); + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + bi->xdp = NULL; + total_bytes += size; + total_packets++; + cleaned_count++; + ntc++; + if (ntc == ring->count) + ntc = 0; + } + + ring->next_to_clean = ntc; + rcu_read_unlock(); + + if (cleaned_count >= IGC_RX_BUFFER_WRITE) + failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (xsk_uses_need_wakeup(ring->xsk_pool)) { + if (failure || ring->next_to_clean == ring->next_to_use) + xsk_set_rx_need_wakeup(ring->xsk_pool); + else + xsk_clear_rx_need_wakeup(ring->xsk_pool); + return total_packets; + } + + return failure ? budget : total_packets; +} + +static void igc_update_tx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->tx.ring; + + u64_stats_update_begin(&ring->tx_syncp); + ring->tx_stats.bytes += bytes; + ring->tx_stats.packets += packets; + u64_stats_update_end(&ring->tx_syncp); + + q_vector->tx.total_bytes += bytes; + q_vector->tx.total_packets += packets; +} + +static void igc_xdp_xmit_zc(struct igc_ring *ring) +{ + struct xsk_buff_pool *pool = ring->xsk_pool; + struct netdev_queue *nq = txring_txq(ring); + union igc_adv_tx_desc *tx_desc = NULL; + int cpu = smp_processor_id(); + struct xdp_desc xdp_desc; + u16 budget, ntu; + + if (!netif_carrier_ok(ring->netdev)) + return; + + __netif_tx_lock(nq, cpu); + + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + + ntu = ring->next_to_use; + budget = igc_desc_unused(ring); + + while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { + u32 cmd_type, olinfo_status; + struct igc_tx_buffer *bi; + dma_addr_t dma; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + xdp_desc.len; + olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; + + dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); + + tx_desc = IGC_TX_DESC(ring, ntu); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + bi = &ring->tx_buffer_info[ntu]; + bi->type = IGC_TX_BUFFER_TYPE_XSK; + bi->protocol = 0; + bi->bytecount = xdp_desc.len; + bi->gso_segs = 1; + bi->time_stamp = jiffies; + bi->next_to_watch = tx_desc; + + netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); + + ntu++; + if (ntu == ring->count) + ntu = 0; + } + + ring->next_to_use = ntu; + if (tx_desc) { + igc_flush_tx_descriptors(ring); + xsk_tx_release(pool); + } + + __netif_tx_unlock(nq); +} + +/** + * igc_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + */ +static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + struct igc_ring *tx_ring = q_vector->tx.ring; + unsigned int i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 xsk_frames = 0; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGC_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + if (!adapter->ecdev) { + napi_consume_skb(tx_buffer->skb, napi_budget); + } + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + if (!adapter->ecdev) { + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + } + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + igc_update_tx_stats(q_vector, total_packets, total_bytes); + + if (tx_ring->xsk_pool) { + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) + xsk_set_tx_need_wakeup(tx_ring->xsk_pool); + igc_xdp_xmit_zc(tx_ring); + } + + if (!adapter->ecdev && + test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct igc_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && + (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && + !tx_ring->oper_gate_closed) { + /* detected Tx unit hang */ + netdev_err(tx_ring->netdev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(IGC_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(!adapter->ecdev && total_packets && + netif_carrier_ok(tx_ring->netdev) && + igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGC_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +static int igc_find_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 ral, rah; + int i; + + for (i = 0; i < max_entries; i++) { + ral = rd32(IGC_RAL(i)); + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + continue; + if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) + continue; + if ((rah & IGC_RAH_RAH_MASK) != + le16_to_cpup((__le16 *)(addr + 4))) + continue; + if (ral != le32_to_cpup((__le32 *)(addr))) + continue; + + return i; + } + + return -1; +} + +static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 rah; + int i; + + for (i = 0; i < max_entries; i++) { + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + return i; + } + + return -1; +} + +/** + * igc_add_mac_filter() - Add MAC address filter + * @adapter: Pointer to adapter where the filter should be added + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr, + int queue) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index >= 0) + goto update_filter; + + index = igc_get_avail_mac_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", + index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr, queue); + +update_filter: + igc_set_mac_filter_hw(adapter, index, type, addr, queue); + return 0; +} + +/** + * igc_del_mac_filter() - Delete MAC address filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @type: MAC address filter type (source or destination) + * @addr: MAC address + */ +static void igc_del_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index < 0) + return; + + if (index == 0) { + /* If this is the default filter, we don't actually delete it. + * We just reset to its default value i.e. disable queue + * assignment. + */ + netdev_dbg(dev, "Disable default MAC filter queue assignment"); + + igc_set_mac_filter_hw(adapter, 0, type, addr, -1); + } else { + netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", + index, + type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr); + + igc_clear_mac_filter_hw(adapter, index); + } +} + +/** + * igc_add_vlan_prio_filter() - Add VLAN priority filter + * @adapter: Pointer to adapter where the filter should be added + * @prio: VLAN priority value + * @queue: Queue number which matching frames are assigned to + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, + int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + if (vlanpqf & IGC_VLANPQF_VALID(prio)) { + netdev_dbg(dev, "VLAN priority filter already in use\n"); + return -EEXIST; + } + + vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); + vlanpqf |= IGC_VLANPQF_VALID(prio); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", + prio, queue); + return 0; +} + +/** + * igc_del_vlan_prio_filter() - Delete VLAN priority filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @prio: VLAN priority value + */ +static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) +{ + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + vlanpqf &= ~IGC_VLANPQF_VALID(prio); + vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", + prio); +} + +static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if (!(etqf & IGC_ETQF_FILTER_ENABLE)) + return i; + } + + return -1; +} + +/** + * igc_add_etype_filter() - Add ethertype filter + * @adapter: Pointer to adapter where the filter should be added + * @etype: Ethertype value + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, + int queue) +{ + struct igc_hw *hw = &adapter->hw; + int index; + u32 etqf; + + index = igc_get_avail_etype_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + etqf = rd32(IGC_ETQF(index)); + + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= etype; + + if (queue >= 0) { + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); + etqf |= IGC_ETQF_QUEUE_ENABLE; + } + + etqf |= IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(index), etqf); + + netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", + etype, queue); + return 0; +} + +static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) + return i; + } + + return -1; +} + +/** + * igc_del_etype_filter() - Delete ethertype filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @etype: Ethertype value + */ +static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int index; + + index = igc_find_etype_filter(adapter, etype); + if (index < 0) + return; + + wr32(IGC_ETQF(index), 0); + + netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", + etype); +} + +static int igc_flex_filter_select(struct igc_adapter *adapter, + struct igc_flex_filter *input, + u32 *fhft) +{ + struct igc_hw *hw = &adapter->hw; + u8 fhft_index; + u32 fhftsl; + + if (input->index >= MAX_FLEX_FILTER) { + dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); + return -EINVAL; + } + + /* Indirect table select register */ + fhftsl = rd32(IGC_FHFTSL); + fhftsl &= ~IGC_FHFTSL_FTSL_MASK; + switch (input->index) { + case 0 ... 7: + fhftsl |= 0x00; + break; + case 8 ... 15: + fhftsl |= 0x01; + break; + case 16 ... 23: + fhftsl |= 0x02; + break; + case 24 ... 31: + fhftsl |= 0x03; + break; + } + wr32(IGC_FHFTSL, fhftsl); + + /* Normalize index down to host table register */ + fhft_index = input->index % 8; + + *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : + IGC_FHFT_EXT(fhft_index - 4); + + return 0; +} + +static int igc_write_flex_filter_ll(struct igc_adapter *adapter, + struct igc_flex_filter *input) +{ + struct device *dev = &adapter->pdev->dev; + struct igc_hw *hw = &adapter->hw; + u8 *data = input->data; + u8 *mask = input->mask; + u32 queuing; + u32 fhft; + u32 wufc; + int ret; + int i; + + /* Length has to be aligned to 8. Otherwise the filter will fail. Bail + * out early to avoid surprises later. + */ + if (input->length % 8 != 0) { + dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); + return -EINVAL; + } + + /* Select corresponding flex filter register and get base for host table. */ + ret = igc_flex_filter_select(adapter, input, &fhft); + if (ret) + return ret; + + /* When adding a filter globally disable flex filter feature. That is + * recommended within the datasheet. + */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + + /* Configure filter */ + queuing = input->length & IGC_FHFT_LENGTH_MASK; + queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; + queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; + + if (input->immediate_irq) + queuing |= IGC_FHFT_IMM_INT; + + if (input->drop) + queuing |= IGC_FHFT_DROP; + + wr32(fhft + 0xFC, queuing); + + /* Write data (128 byte) and mask (128 bit) */ + for (i = 0; i < 16; ++i) { + const size_t data_idx = i * 8; + const size_t row_idx = i * 16; + u32 dw0 = + (data[data_idx + 0] << 0) | + (data[data_idx + 1] << 8) | + (data[data_idx + 2] << 16) | + (data[data_idx + 3] << 24); + u32 dw1 = + (data[data_idx + 4] << 0) | + (data[data_idx + 5] << 8) | + (data[data_idx + 6] << 16) | + (data[data_idx + 7] << 24); + u32 tmp; + + /* Write row: dw0, dw1 and mask */ + wr32(fhft + row_idx, dw0); + wr32(fhft + row_idx + 4, dw1); + + /* mask is only valid for MASK(7, 0) */ + tmp = rd32(fhft + row_idx + 8); + tmp &= ~GENMASK(7, 0); + tmp |= mask[i]; + wr32(fhft + row_idx + 8, tmp); + } + + /* Enable filter. */ + wufc |= IGC_WUFC_FLEX_HQ; + if (input->index > 8) { + /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); + + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc |= (IGC_WUFC_FLX0 << input->index); + } + wr32(IGC_WUFC, wufc); + + dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", + input->index); + + return 0; +} + +static void igc_flex_filter_add_field(struct igc_flex_filter *flex, + const void *src, unsigned int offset, + size_t len, const void *mask) +{ + int i; + + /* data */ + memcpy(&flex->data[offset], src, len); + + /* mask */ + for (i = 0; i < len; ++i) { + const unsigned int idx = i + offset; + const u8 *ptr = mask; + + if (mask) { + if (ptr[i] & 0xff) + flex->mask[idx / 8] |= BIT(idx % 8); + + continue; + } + + flex->mask[idx / 8] |= BIT(idx % 8); + } +} + +static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + int i; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + for (i = 0; i < MAX_FLEX_FILTER; i++) { + if (i < 8) { + if (!(wufc & (IGC_WUFC_FLX0 << i))) + return i; + } else { + if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) + return i; + } + } + + return -ENOSPC; +} + +static bool igc_flex_filter_in_use(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + if (wufc & IGC_WUFC_FILTER_MASK) + return true; + + if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) + return true; + + return false; +} + +static int igc_add_flex_filter(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct igc_flex_filter flex = { }; + struct igc_nfc_filter *filter = &rule->filter; + unsigned int eth_offset, user_offset; + int ret, index; + bool vlan; + + index = igc_find_avail_flex_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + /* Construct the flex filter: + * -> dest_mac [6] + * -> src_mac [6] + * -> tpid [2] + * -> vlan tci [2] + * -> ether type [2] + * -> user data [8] + * -> = 26 bytes => 32 length + */ + flex.index = index; + flex.length = 32; + flex.rx_queue = rule->action; + + vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; + eth_offset = vlan ? 16 : 12; + user_offset = vlan ? 18 : 14; + + /* Add destination MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, + ETH_ALEN, NULL); + + /* Add source MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->src_addr, 6, + ETH_ALEN, NULL); + + /* Add VLAN etype */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) + igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, + sizeof(filter->vlan_etype), + NULL); + + /* Add VLAN TCI */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, + sizeof(filter->vlan_tci), NULL); + + /* Add Ether type */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + __be16 etype = cpu_to_be16(filter->etype); + + igc_flex_filter_add_field(&flex, &etype, eth_offset, + sizeof(etype), NULL); + } + + /* Add user data */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) + igc_flex_filter_add_field(&flex, &filter->user_data, + user_offset, + sizeof(filter->user_data), + filter->user_mask); + + /* Add it down to the hardware and enable it. */ + ret = igc_write_flex_filter_ll(adapter, &flex); + if (ret) + return ret; + + filter->flex_index = index; + + return 0; +} + +static void igc_del_flex_filter(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc; + + /* Just disable the filter. The filter table itself is kept + * intact. Another flex_filter_add() should override the "old" data + * then. + */ + if (reg_index > 8) { + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc = rd32(IGC_WUFC); + + wufc &= ~(IGC_WUFC_FLX0 << reg_index); + wr32(IGC_WUFC, wufc); + } + + if (igc_flex_filter_in_use(adapter)) + return; + + /* No filters are in use, we may disable flex filters */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); +} + +static int igc_enable_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + int err; + + if (rule->flex) { + return igc_add_flex_filter(adapter, rule); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_add_etype_filter(adapter, rule->filter.etype, + rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + err = igc_add_vlan_prio_filter(adapter, prio, rule->action); + if (err) + return err; + } + + return 0; +} + +static void igc_disable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + if (rule->flex) { + igc_del_flex_filter(adapter, rule->filter.flex_index); + return; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_del_etype_filter(adapter, rule->filter.etype); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + igc_del_vlan_prio_filter(adapter, prio); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr); +} + +/** + * igc_get_nfc_rule() - Get NFC rule + * @adapter: Pointer to adapter + * @location: Rule location + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: Pointer to NFC rule at @location. If not found, NULL. + */ +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location) +{ + struct igc_nfc_rule *rule; + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (rule->location == location) + return rule; + if (rule->location > location) + break; + } + + return NULL; +} + +/** + * igc_del_nfc_rule() - Delete NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be deleted + * + * Disable NFC rule in hardware and delete it from adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + */ +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + igc_disable_nfc_rule(adapter, rule); + + list_del(&rule->list); + adapter->nfc_rule_count--; + + kfree(rule); +} + +static void igc_flush_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule, *tmp; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +/** + * igc_add_nfc_rule() - Add NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be added + * + * Enable NFC rule in hardware and add it to adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 on success, negative errno on failure. + */ +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + struct igc_nfc_rule *pred, *cur; + int err; + + err = igc_enable_nfc_rule(adapter, rule); + if (err) + return err; + + pred = NULL; + list_for_each_entry(cur, &adapter->nfc_rule_list, list) { + if (cur->location >= rule->location) + break; + pred = cur; + } + + list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); + adapter->nfc_rule_count++; + return 0; +} + +static void igc_restore_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) + igc_enable_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); + return 0; +} + +/** + * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + */ +static void igc_set_rx_mode(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= IGC_RCTL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igc_write_mc_addr_list(netdev); + if (count < 0) + rctl |= IGC_RCTL_MPE; + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) + rctl |= IGC_RCTL_UPE; + + /* update state of unicast and multicast */ + rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); + wr32(IGC_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) + rlpml = IGC_MAX_FRAME_BUILD_SKB; +#endif + wr32(IGC_RLPML, rlpml); +} + +/** + * igc_configure - configure the hardware for RX and TX + * @adapter: private board structure + */ +static void igc_configure(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i = 0; + + igc_get_hw_control(adapter); + igc_set_rx_mode(netdev); + + igc_restore_vlan(adapter); + + igc_setup_tctl(adapter); + igc_setup_mrqc(adapter); + igc_setup_rctl(adapter); + + igc_set_default_mac_filter(adapter); + igc_restore_nfc_rules(adapter); + + igc_configure_tx(adapter); + igc_configure_rx(adapter); + + igc_rx_fifo_flush_base(&adapter->hw); + + /* call igc_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); + } +} + +/** + * igc_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + */ +static void igc_write_ivar(struct igc_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(IGC_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | IGC_IVAR_VALID) << offset; + + array_wr32(IGC_IVAR0, index, ivar); +} + +static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + int rx_queue = IGC_N0_QUEUE; + int tx_queue = IGC_N0_QUEUE; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case igc_i225: + if (rx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igc_configure_msix - Configure MSI-X hardware + * @adapter: Pointer to adapter structure + * + * igc_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + */ +static void igc_configure_msix(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i, vector = 0; + u32 tmp; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case igc_i225: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | + IGC_GPIE_PBA | IGC_GPIE_EIAME | + IGC_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | IGC_IVAR_VALID) << 8; + + wr32(IGC_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igc_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void igc_irq_enable(struct igc_adapter *adapter) +{ + if (adapter->ecdev) { + /* skip enabling interrupts */ + return; + } + + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; + u32 regval = rd32(IGC_EIAC); + + wr32(IGC_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(IGC_EIAM); + wr32(IGC_EIAM, regval | adapter->eims_enable_mask); + wr32(IGC_EIMS, adapter->eims_enable_mask); + wr32(IGC_IMS, ims); + } else { + wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + } +} + +/** + * igc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void igc_irq_disable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 regval = rd32(IGC_EIAM); + + wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); + wr32(IGC_EIMC, adapter->eims_enable_mask); + regval = rd32(IGC_EIAC); + wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(IGC_IAM, 0); + wr32(IGC_IMC, ~0); + wrfl(); + + if (adapter->ecdev) { + /* skip synchonizing IRQs */ + return; + } + + if (adapter->msix_entries) { + int vector = 0, i; + + synchronize_irq(adapter->msix_entries[vector++].vector); + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues) +{ + /* Determine if we need to pair queues. */ + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; +} + +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) +{ + return IGC_MAX_RX_QUEUES; +} + +static void igc_init_queue_configuration(struct igc_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igc_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igc_set_flag_queue_pairs(adapter, max_rss_queues); +} + +/** + * igc_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igc_free_q_vector. + */ +static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* if we're coming from igc_set_interrupt_capability, the vectors are + * not yet allocated + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + if (!adapter->ecdev) { + netif_napi_del(&q_vector->napi); + } +} + +/** + * igc_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + */ +static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igc_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igc_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + */ +static void igc_free_q_vectors(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igc_reset_q_vector(adapter, v_idx); + igc_free_q_vector(adapter, v_idx); + } +} + +/** + * igc_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + */ +static void igc_update_itr(struct igc_q_vector *q_vector, + struct igc_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes / packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igc_set_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + current_itr = 0; + new_itr = IGC_4K_ITR; + goto set_itr_now; + default: + break; + } + + igc_update_itr(q_vector, &q_vector->tx); + igc_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igc_reset_interrupt_capability(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IGC_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); + } + + while (v_idx--) + igc_reset_q_vector(adapter, v_idx); +} + +/** + * igc_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: Pointer to adapter structure + * @msix: boolean value for MSI-X capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static void igc_set_interrupt_capability(struct igc_adapter *adapter, + bool msix) +{ + int numvecs, i; + int err; + + if (!msix) + goto msi_only; + adapter->flags |= IGC_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + + adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), + GFP_KERNEL); + + if (!adapter->msix_entries) + return; + + /* populate entry values */ + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + igc_reset_interrupt_capability(adapter); + +msi_only: + adapter->flags &= ~IGC_FLAG_HAS_MSIX; + + adapter->rss_queues = 1; + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGC_FLAG_HAS_MSI; +} + +/** + * igc_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igc_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + */ +static void igc_update_ring_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + new_val = IGC_4K_ITR; + goto set_itr_val; + default: + break; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if (avg_wire_size > 300 && avg_wire_size < 1200) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGC_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGC_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +static void igc_ring_irq_enable(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if (adapter->num_q_vectors == 1) + igc_set_itr(q_vector); + else + igc_update_ring_itr(q_vector); + } + + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->msix_entries) + wr32(IGC_EIMS, q_vector->eims_value); + else + igc_irq_enable(adapter); + } +} + +static void igc_add_ring(struct igc_ring *ring, + struct igc_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igc_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + */ +static void igc_cache_ring_register(struct igc_adapter *adapter) +{ + int i = 0, j = 0; + + switch (adapter->hw.mac.type) { + case igc_i225: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = j; + break; + } +} + +/** + * igc_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + */ +static int igc_poll(struct napi_struct *napi, int budget) +{ + struct igc_q_vector *q_vector = container_of(napi, + struct igc_q_vector, + napi); + struct igc_ring *rx_ring = q_vector->rx.ring; + bool clean_complete = true; + int work_done = 0; + + if (q_vector->tx.ring) + clean_complete = igc_clean_tx_irq(q_vector, budget); + + if (rx_ring) { + int cleaned = rx_ring->xsk_pool ? + igc_clean_rx_irq_zc(q_vector, budget) : + igc_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igc_ring_irq_enable(q_vector); + + return min(work_done, budget - 1); +} + +/** + * igc_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int igc_alloc_q_vector(struct igc_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct igc_q_vector *q_vector; + struct igc_ring *ring; + int ring_count; + + /* igc only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); + else + memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); + if (!q_vector) + return -ENOMEM; + + if (!adapter->ecdev) { + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); + } + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + IGC_EITR(0); + q_vector->itr_val = IGC_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* initialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igc_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igc_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + +/** + * igc_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int igc_alloc_q_vectors(struct igc_adapter *adapter) +{ + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int q_vectors = adapter->num_q_vectors; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igc_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: Pointer to adapter structure + * @msix: boolean for MSI-X capability + * + * This function initializes the interrupts and allocates all of the queues. + */ +static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) +{ + struct net_device *dev = adapter->netdev; + int err = 0; + + igc_set_interrupt_capability(adapter, msix); + + err = igc_alloc_q_vectors(adapter); + if (err) { + netdev_err(dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igc_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igc_reset_interrupt_capability(adapter); + return err; +} + +/** + * igc_sw_init - Initialize general software structures (struct igc_adapter) + * @adapter: board private structure to initialize + * + * igc_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int igc_sw_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGC_DEFAULT_TXD; + adapter->rx_ring_count = IGC_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGC_DEFAULT_ITR; + adapter->tx_itr_setting = IGC_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; + + /* adjust max frame to be at least the size of a standard frame */ + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + mutex_init(&adapter->nfc_rule_lock); + INIT_LIST_HEAD(&adapter->nfc_rule_list); + adapter->nfc_rule_count = 0; + + spin_lock_init(&adapter->stats64_lock); + spin_lock_init(&adapter->qbv_tx_lock); + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGC_FLAG_HAS_MSIX; + + igc_init_queue_configuration(adapter); + + /* This call may decrease the number of queues */ + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igc_irq_disable(adapter); + + set_bit(__IGC_DOWN, &adapter->state); + + return 0; +} + +/** + * igc_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + */ +void igc_up(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i = 0; + + /* hardware has been reset, we need to reload some things */ + igc_configure(adapter); + + clear_bit(__IGC_DOWN, &adapter->state); + if (!adapter->ecdev) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + } + if (adapter->msix_entries) + igc_configure_msix(adapter); + else + igc_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!adapter->ecdev) { + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + } +} + +/** + * igc_update_stats - Update the board statistics counters + * @adapter: board private structure + */ +void igc_update_stats(struct igc_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.tlpic += rd32(IGC_TLPIC); + adapter->stats.rlpic += rd32(IGC_RLPIC); + adapter->stats.hgptc += rd32(IGC_HGPTC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + adapter->stats.colc += rd32(IGC_RERC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + + adapter->stats.iac += rd32(IGC_IAC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped */ + net_stats->tx_dropped = adapter->stats.txdrop; + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); +} + +/** + * igc_down - Close the interface + * @adapter: board private structure + */ +void igc_down(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i = 0; + + set_bit(__IGC_DOWN, &adapter->state); + + igc_ptp_suspend(adapter); + + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } + /* set trans_start so we don't get spurious watchdogs during reset */ + netif_trans_update(netdev); + if (!adapter->ecdev) { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); + + igc_irq_disable(adapter); + } + + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (!adapter->ecdev && adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igc_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; + + igc_disable_all_tx_rings_hw(adapter); + igc_clean_all_tx_rings(adapter); + igc_clean_all_rx_rings(adapter); +} + +void igc_reinit_locked(struct igc_adapter *adapter) +{ + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igc_down(adapter); + igc_up(adapter); + clear_bit(__IGC_RESETTING, &adapter->state); +} + +static void igc_reset_task(struct work_struct *work) +{ + struct igc_adapter *adapter; + + adapter = container_of(work, struct igc_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGC_DOWN, &adapter->state) || + test_bit(__IGC_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igc_rings_dump(adapter); + igc_regs_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igc_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igc_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int igc_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(netdev, "Jumbo frames not supported with XDP"); + return -EINVAL; + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igc_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igc_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igc_up(adapter); + else + igc_reset(adapter); + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +/** + * igc_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: queue number that timed out + **/ +static void igc_tx_timeout(struct net_device *netdev, + unsigned int __always_unused txqueue) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + wr32(IGC_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +/** + * igc_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. + */ +static void igc_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + if (!test_bit(__IGC_RESETTING, &adapter->state)) + igc_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igc_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igc_vlan_mode(netdev, features); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) + igc_flush_nfc_rules(adapter); + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; +} + +static netdev_features_t +igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igc_tsync_interrupt(struct igc_adapter *adapter) +{ + u32 ack, tsauxc, sec, nsec, tsicr; + struct igc_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + + tsicr = rd32(IGC_TSICR); + ack = 0; + + if (tsicr & IGC_TSICR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_SYS_WRAP; + } + + if (tsicr & IGC_TSICR_TXTS) { + /* retrieve hardware timestamp */ + igc_ptp_tx_tstamp_event(adapter); + ack |= IGC_TSICR_TXTS; + } + + if (tsicr & IGC_TSICR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT0; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT0; + } + + if (tsicr & IGC_TSICR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT1; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT1; + } + + if (tsicr & IGC_TSICR_AUTT0) { + nsec = rd32(IGC_AUXSTMPL0); + sec = rd32(IGC_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT0; + } + + if (tsicr & IGC_TSICR_AUTT1) { + nsec = rd32(IGC_AUXSTMPL1); + sec = rd32(IGC_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(IGC_TSICR, ack); +} + +/** + * igc_msix_other - msix other interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t igc_msix_other(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_hw *hw = &adapter->hw; + u32 icr = rd32(IGC_ICR); + + /* reading ICR causes bit 31 of EICR to be cleared */ + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & IGC_ICR_LSC) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + wr32(IGC_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igc_write_itr(struct igc_q_vector *q_vector) +{ + u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = IGC_ITR_VAL_MASK; + + itr_val |= IGC_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igc_msix_ring(int irq, void *data) +{ + struct igc_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igc_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_request_msix - Initialize MSI-X interrupts + * @adapter: Pointer to adapter structure + * + * igc_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + */ +static int igc_request_msix(struct igc_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + int i = 0, err = 0, vector = 0, free_vector = 0; + struct net_device *netdev = adapter->netdev; + if (adapter->ecdev) { + /* avoid requesting MSI-X. */ + return 0; + } + + err = request_irq(adapter->msix_entries[vector].vector, + &igc_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igc_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igc_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igc_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: Pointer to adapter structure + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) +{ + igc_free_q_vectors(adapter); + igc_reset_interrupt_capability(adapter); +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igc_update_phy_info(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); + + igc_get_phy_info(&adapter->hw); +} + +/** + * igc_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + */ +bool igc_has_link(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the igc_check_for_link establishes link + * for copper adapters ONLY + */ + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + + if (hw->mac.type == igc_i225) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +/** + * igc_watchdog - Timer Call-back + * @t: timer for the watchdog + */ +static void igc_watchdog(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igc_watchdog_task(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, + struct igc_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_phy_info *phy = &hw->phy; + u16 phy_data, retry_count = 20; + u32 link; + int i; + + link = igc_has_link(adapter); + + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + if (link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(IGC_CTRL); + /* Link status message must follow this format */ + netdev_info(netdev, + "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & IGC_CTRL_TFCE) && + (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : + (ctrl & IGC_CTRL_RFCE) ? "RX" : + (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGC_FLAG_EEE) && + adapter->link_duplex == HALF_DUPLEX) { + netdev_info(netdev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); + adapter->hw.dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igc_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + case SPEED_1000: + case SPEED_2500: + adapter->tx_timeout_factor = 1; + break; + } + + /* Once the launch time has been set on the wire, there + * is a delay before the link speed can be determined + * based on link-up activity. Write into the register + * as soon as we know the correct link speed. + */ + igc_tsn_adjust_txtime_offset(adapter); + + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + netdev_err(netdev, "exceed max 2 second\n"); + } + } else { + netdev_err(netdev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* Links status message must follow this format */ + netdev_info(netdev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + } + } + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(IGC_EICS, eics); + } else { + wr32(IGC_ICS, IGC_ICS_RXDMT0); + } + + igc_ptp_tx_hang(adapter); + + /* Reset the timer */ + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +/** + * igc_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr_msi(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(IGC_ICR); + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(IGC_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & IGC_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igc_free_irq(struct igc_adapter *adapter) +{ + if (adapter->ecdev) { + /* no IRQ to free in EtherCAT operation */ + return; + } + + if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igc_request_irq - initialize interrupts + * @adapter: Pointer to adapter structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static int igc_request_irq(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + err = igc_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + igc_clear_interrupt_scheme(adapter); + err = igc_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + igc_setup_all_tx_resources(adapter); + igc_setup_all_rx_resources(adapter); + igc_configure(adapter); + } + + igc_assign_vector(adapter->q_vector[0], 0); + + if (!adapter->ecdev && adapter->flags & IGC_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, &igc_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igc_reset_interrupt_capability(adapter); + adapter->flags &= ~IGC_FLAG_HAS_MSI; + } + + if (!adapter->ecdev) { + err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + netdev_err(netdev, "Error %d getting interrupt\n", err); + } +request_done: + return err; +} + +/** + * __igc_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: boolean indicating if the device is resuming + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int __igc_open(struct net_device *netdev, bool resuming) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + int err = 0; + int i = 0; + + /* disallow open during test */ + + if (test_bit(__IGC_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + if (adapter->ecdev) { + ecdev_set_link(adapter->ecdev, 0); + } + else { + netif_carrier_off(netdev); + } + + /* allocate transmit descriptors */ + err = igc_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igc_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igc_power_up_link(adapter); + + igc_configure(adapter); + + err = igc_request_irq(adapter); + if (err) + goto err_req_irq; + + if (!adapter->ecdev) { + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + } + + clear_bit(__IGC_DOWN, &adapter->state); + if (!adapter->ecdev) { + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + } + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + if (!adapter->ecdev) { + netif_tx_start_all_queues(netdev); + } + + if (!adapter->ecdev) { + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + } + + return IGC_SUCCESS; + +err_set_queues: + igc_free_irq(adapter); +err_req_irq: + igc_release_hw_control(adapter); + igc_power_down_phy_copper_base(&adapter->hw); + igc_free_all_rx_resources(adapter); +err_setup_rx: + igc_free_all_tx_resources(adapter); +err_setup_tx: + igc_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igc_open(struct net_device *netdev) +{ + return __igc_open(netdev, false); +} + +/** + * __igc_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: boolean indicating the device is suspending + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int __igc_close(struct net_device *netdev, bool suspending) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igc_down(adapter); + + igc_release_hw_control(adapter); + + igc_free_irq(adapter); + + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +int igc_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igc_close(netdev, false); + return 0; +} + +/** + * igc_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return igc_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igc_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, + bool enable) +{ + struct igc_ring *ring; + + if (queue < 0 || queue >= adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + +static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) +{ + struct timespec64 b; + + b = ktime_to_timespec64(base_time); + + return timespec64_compare(now, &b) > 0; +} + +static bool validate_schedule(struct igc_adapter *adapter, + const struct tc_taprio_qopt_offload *qopt) +{ + int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; + struct timespec64 now; + size_t n; + + if (qopt->cycle_time_extension) + return false; + + igc_ptp_read(adapter, &now); + + /* If we program the controller's BASET registers with a time + * in the future, it will hold all the packets until that + * time, causing a lot of TX Hangs, so to avoid that, we + * reject schedules that would start in the future. + * Note: Limitation above is no longer in i226. + */ + if (!is_base_time_past(qopt->base_time, &now) && + igc_is_device_id_i225(hw)) + return false; + + for (n = 0; n < qopt->num_entries; n++) { + const struct tc_taprio_sched_entry *e, *prev; + int i; + + prev = n ? &qopt->entries[n - 1] : NULL; + e = &qopt->entries[n]; + + /* i225 only supports "global" frame preemption + * settings. + */ + if (e->command != TC_TAPRIO_CMD_SET_GATES) + return false; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (e->gate_mask & BIT(i)) { + queue_uses[i]++; + + /* There are limitations: A single queue cannot + * be opened and closed multiple times per cycle + * unless the gate stays open. Check for it. + */ + if (queue_uses[i] > 1 && + !(prev->gate_mask & BIT(i))) + return false; + } + } + + return true; +} + +static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_qbv_clear_schedule(struct igc_adapter *adapter) +{ + unsigned long flags; + int i; + + adapter->base_time = 0; + adapter->cycle_time = NSEC_PER_SEC; + adapter->taprio_offload_enable = false; + adapter->qbv_config_change_errors = 0; + adapter->qbv_count = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + ring->max_sdu = 0; + } + + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + + adapter->qbv_transition = false; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->oper_gate_closed = false; + ring->admin_gate_closed = false; + } + + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + + return 0; +} + +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ + igc_qbv_clear_schedule(adapter); + + return 0; +} + +static void igc_taprio_stats(struct net_device *dev, + struct tc_taprio_qopt_stats *stats) +{ + /* When Strict_End is enabled, the tx_overruns counter + * will always be zero. + */ + stats->tx_overruns = 0; +} + +static void igc_taprio_queue_stats(struct net_device *dev, + struct tc_taprio_qopt_queue_stats *queue_stats) +{ + struct tc_taprio_qopt_stats *stats = &queue_stats->stats; + + /* When Strict_End is enabled, the tx_overruns counter + * will always be zero. + */ + stats->tx_overruns = 0; +} + +static int igc_save_qbv_schedule(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + bool queue_configured[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; + u32 start_time = 0, end_time = 0; + struct timespec64 now; + unsigned long flags; + size_t n; + int i; + + switch (qopt->cmd) { + case TAPRIO_CMD_REPLACE: + break; + case TAPRIO_CMD_DESTROY: + return igc_tsn_clear_schedule(adapter); + case TAPRIO_CMD_STATS: + igc_taprio_stats(adapter->netdev, &qopt->stats); + return 0; + case TAPRIO_CMD_QUEUE_STATS: + igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); + return 0; + default: + return -EOPNOTSUPP; + } + + if (qopt->base_time < 0) + return -ERANGE; + + if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) + return -EALREADY; + + if (!validate_schedule(adapter, qopt)) + return -EINVAL; + + adapter->cycle_time = qopt->cycle_time; + adapter->base_time = qopt->base_time; + adapter->taprio_offload_enable = true; + + igc_ptp_read(adapter, &now); + + for (n = 0; n < qopt->num_entries; n++) { + struct tc_taprio_sched_entry *e = &qopt->entries[n]; + + end_time += e->interval; + + /* If any of the conditions below are true, we need to manually + * control the end time of the cycle. + * 1. Qbv users can specify a cycle time that is not equal + * to the total GCL intervals. Hence, recalculation is + * necessary here to exclude the time interval that + * exceeds the cycle time. + * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, + * once the end of the list is reached, it will switch + * to the END_OF_CYCLE state and leave the gates in the + * same state until the next cycle is started. + */ + if (end_time > adapter->cycle_time || + n + 1 == qopt->num_entries) + end_time = adapter->cycle_time; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!(e->gate_mask & BIT(i))) + continue; + + /* Check whether a queue stays open for more than one + * entry. If so, keep the start and advance the end + * time. + */ + if (!queue_configured[i]) + ring->start_time = start_time; + ring->end_time = end_time; + + if (ring->start_time >= adapter->cycle_time) + queue_configured[i] = false; + else + queue_configured[i] = true; + } + + start_time += e->interval; + } + + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + + /* Check whether a queue gets configured. + * If not, set the start and end time to be end time. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!is_base_time_past(qopt->base_time, &now)) { + ring->admin_gate_closed = false; + } else { + ring->oper_gate_closed = false; + ring->admin_gate_closed = false; + } + + if (!queue_configured[i]) { + if (!is_base_time_past(qopt->base_time, &now)) + ring->admin_gate_closed = true; + else + ring->oper_gate_closed = true; + + ring->start_time = end_time; + ring->end_time = end_time; + } + } + + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + struct net_device *dev = adapter->netdev; + + if (qopt->max_sdu[i]) + ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; + else + ring->max_sdu = 0; + } + + return 0; +} + +static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_qbv_schedule(adapter, qopt); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; + struct net_device *netdev = adapter->netdev; + struct igc_ring *ring; + int i; + + /* i225 has two sets of credit-based shaper logic. + * Supporting it only on the top two priority queues + */ + if (queue < 0 || queue > 1) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + for (i = 0; i < IGC_MAX_SR_QUEUES; i++) + if (adapter->tx_ring[i]) + cbs_status[i] = adapter->tx_ring[i]->cbs_enable; + + /* CBS should be enabled on the highest priority queue first in order + * for the CBS algorithm to operate as intended. + */ + if (enable) { + if (queue == 1 && !cbs_status[0]) { + netdev_err(netdev, + "Enabling CBS on queue1 before queue0\n"); + return -EINVAL; + } + } else { + if (queue == 0 && cbs_status[1]) { + netdev_err(netdev, + "Disabling CBS on queue0 before queue1\n"); + return -EINVAL; + } + } + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static int igc_tsn_enable_cbs(struct igc_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_tc_query_caps(struct igc_adapter *adapter, + struct tc_query_caps_base *base) +{ + struct igc_hw *hw = &adapter->hw; + + switch (base->type) { + case TC_SETUP_QDISC_TAPRIO: { + struct tc_taprio_caps *caps = base->caps; + + caps->broken_mqprio = true; + + if (hw->mac.type == igc_i225) { + caps->supports_queue_max_sdu = true; + caps->gate_mask_per_txq = true; + } + + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + adapter->tc_setup_type = type; + + switch (type) { + case TC_QUERY_CAPS: + return igc_tc_query_caps(adapter, type_data); + case TC_SETUP_QDISC_TAPRIO: + return igc_tsn_enable_qbv_scheduling(adapter, type_data); + + case TC_SETUP_QDISC_ETF: + return igc_tsn_enable_launchtime(adapter, type_data); + + case TC_SETUP_QDISC_CBS: + return igc_tsn_enable_cbs(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return igc_xdp_setup_pool(adapter, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int igc_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int i, drops; + + if (unlikely(!netif_carrier_ok(dev))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + + drops = 0; + for (i = 0; i < num_frames; i++) { + int err; + struct xdp_frame *xdpf = frames[i]; + + err = igc_xdp_init_tx_descriptor(ring, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + igc_flush_tx_descriptors(ring); + + __netif_tx_unlock(nq); + + return num_frames - drops; +} + +static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, + struct igc_q_vector *q_vector) +{ + struct igc_hw *hw = &adapter->hw; + u32 eics = 0; + + eics |= q_vector->eims_value; + wr32(IGC_EICS, eics); +} + +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_q_vector *q_vector; + struct igc_ring *ring; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!igc_xdp_is_enabled(adapter)) + return -ENXIO; + + if (queue_id >= adapter->num_rx_queues) + return -EINVAL; + + ring = adapter->rx_ring[queue_id]; + + if (!ring->xsk_pool) + return -ENXIO; + + q_vector = adapter->q_vector[queue_id]; + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + igc_trigger_rxtxq_interrupt(adapter, q_vector); + + return 0; +} + +static const struct net_device_ops igc_netdev_ops = { + .ndo_open = igc_open, + .ndo_stop = igc_close, + .ndo_start_xmit = igc_xmit_frame, + .ndo_set_rx_mode = igc_set_rx_mode, + .ndo_set_mac_address = igc_set_mac, + .ndo_change_mtu = igc_change_mtu, + .ndo_tx_timeout = igc_tx_timeout, + .ndo_get_stats64 = igc_get_stats64, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, + .ndo_eth_ioctl = igc_ioctl, + .ndo_setup_tc = igc_setup_tc, + .ndo_bpf = igc_bpf, + .ndo_xdp_xmit = igc_xdp_xmit, + .ndo_xsk_wakeup = igc_xsk_wakeup, +}; + +/** +* ec_poll - EtherCAT poll routine +* @netdev: net device structure +* +* This function can never fail. +* +**/ +void ec_poll(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + int budget = 64; + + if (jiffies - adapter->ec_watchdog_jiffies >= 2 * HZ) { + struct igc_hw *hw = &adapter->hw; + + bool link; + hw->mac.get_link_status = true; + link = igc_has_link(adapter); + ecdev_set_link(adapter->ecdev, link); + adapter->ec_watchdog_jiffies = jiffies; + } + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + if (q_vector->tx.ring) { + igc_clean_tx_irq(q_vector, budget); + } + + if (q_vector->rx.ring) { + igc_clean_rx_irq(q_vector, budget); + } + } +} + +/** + +/* PCIe configuration access */ +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_read_word(adapter->pdev, reg, value); + + return IGC_SUCCESS; +} + +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_write_word(adapter->pdev, reg, *value); + + return IGC_SUCCESS; +} + +u32 igc_rd32(struct igc_hw *hw, u32 reg) +{ + struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (IGC_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igc->netdev; + + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + WARN(pci_device_is_present(igc->pdev), + "igc: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +/* Mapping HW RSS Type to enum xdp_rss_hash_type */ +static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = { + [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2, + [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP, + [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4, + [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP, + [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX, + [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6, + [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, + [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP, + [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP, + [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX, + [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ + [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */ + [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */ + [13] = XDP_RSS_TYPE_NONE, + [14] = XDP_RSS_TYPE_NONE, + [15] = XDP_RSS_TYPE_NONE, +}; + +static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) +{ + const struct igc_xdp_buff *ctx = (void *)_ctx; + + if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) + return -ENODATA; + + *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); + *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; + + return 0; +} + +static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) +{ + const struct igc_xdp_buff *ctx = (void *)_ctx; + + if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { + *timestamp = ctx->rx_ts; + + return 0; + } + + return -ENODATA; +} + +static const struct xdp_metadata_ops igc_xdp_metadata_ops = { + .xmo_rx_hash = igc_xdp_rx_hash, + .xmo_rx_timestamp = igc_xdp_rx_timestamp, +}; + +static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) +{ + struct igc_adapter *adapter = container_of(timer, struct igc_adapter, + hrtimer); + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + + adapter->qbv_transition = true; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->admin_gate_closed) { + tx_ring->admin_gate_closed = false; + tx_ring->oper_gate_closed = true; + } else { + tx_ring->oper_gate_closed = false; + } + } + adapter->qbv_transition = false; + + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + + return HRTIMER_NORESTART; +} + +/** + * igc_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igc_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igc_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring the adapter private structure, + * and a hardware reset occur. + */ +static int igc_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct igc_adapter *adapter; + struct net_device *netdev; + struct igc_hw *hw; + const struct igc_info *ei = igc_info_tbl[ent->driver_data]; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + + err = pci_request_mem_regions(pdev, igc_driver_name); + if (err) + goto err_pci_reg; + + err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), + IGC_MAX_TX_QUEUES); + + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->port_num = hw->bus.func; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = pci_save_state(pdev); + if (err) + goto err_ioremap; + + err = -EIO; + adapter->io_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!adapter->io_addr) + goto err_ioremap; + + /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igc_netdev_ops; + netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; + igc_ethtool_set_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC and PHY function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* Add supported features to the features list*/ + netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_TSO_ECN; + netdev->features |= NETIF_F_RXHASH; + netdev->features |= NETIF_F_RXCSUM; + netdev->features |= NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_HW_TC; + +#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; + + /* setup the private structure */ + err = igc_sw_init(adapter); + if (err) + goto err_sw_init; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= netdev->features; + + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + if (igc_get_flash_presence_i225(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + eth_hw_addr_set(netdev, hw->mac.addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + /* configure RXPBSIZE and TXPBSIZE */ + wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + + timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igc_reset_task); + INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); + + hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + adapter->hrtimer.function = &igc_qbv_scheduling_timer; + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0xaf; + + hw->fc.requested_mode = igc_fc_default; + hw->fc.current_mode = igc_fc_default; + + /* By default, support wake on port A */ + adapter->flags |= IGC_FLAG_WOL_SUPPORTED; + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) + adapter->wol |= IGC_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGC_FLAG_WOL_SUPPORTED); + + igc_ptp_init(adapter); + + igc_tsn_clear_schedule(adapter); + + /* reset the hardware with the new settings */ + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + adapter->ecdev = ecdev_offer(netdev, ec_poll, THIS_MODULE); + if (adapter->ecdev) { + err = ecdev_open(adapter->ecdev); + if (err) { + ecdev_withdraw(adapter->ecdev); + goto err_register; + } + adapter->ec_watchdog_jiffies = jiffies; + } else { + strncpy(netdev->name, "eth%d", IFNAMSIZ); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + } + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + + /* print pcie link status and MAC address */ + pcie_print_link_status(pdev); + netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + /* Disable EEE for internal PHY devices */ + hw->dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + igc_set_eee_i225(hw, false, false, false); + + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + igc_release_hw_control(adapter); +err_eeprom: + if (!igc_check_reset_block(hw)) + igc_reset_phy(hw); +err_sw_init: + igc_clear_interrupt_scheme(adapter); + iounmap(adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igc_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igc_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void igc_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (adapter->ecdev) { + ecdev_close(adapter->ecdev); + ecdev_withdraw(adapter->ecdev); + } + + pm_runtime_get_noresume(&pdev->dev); + + igc_flush_nfc_rules(adapter); + + igc_ptp_stop(adapter); + + pci_disable_ptm(pdev); + pci_clear_master(pdev); + + set_bit(__IGC_DOWN, &adapter->state); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + hrtimer_cancel(&adapter->hrtimer); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + if (!adapter->ecdev) { + unregister_netdev(netdev); + } + + igc_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, adapter->io_addr); + pci_release_mem_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; + struct igc_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + bool wake; + + if (adapter->ecdev) { + ecdev_close(adapter->ecdev); + } else { + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igc_close(netdev, true); + } + + igc_ptp_suspend(adapter); + + igc_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_LU) + wufc &= ~IGC_WUFC_LNKC; + + if (wufc) { + igc_setup_rctl(adapter); + igc_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IGC_WUFC_MC) { + rctl = rd32(IGC_RCTL); + rctl |= IGC_RCTL_MPE; + wr32(IGC_RCTL, rctl); + } + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_ADVD3WUC; + wr32(IGC_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igc_disable_pcie_master(hw); + + wr32(IGC_WUC, IGC_WUC_PME_EN); + wr32(IGC_WUFC, wufc); + } else { + wr32(IGC_WUC, 0); + wr32(IGC_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igc_power_down_phy_copper_base(&adapter->hw); + else + igc_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int __maybe_unused igc_runtime_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 1); +} + +static void igc_deliver_wake_packet(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if (wupl == 0 || wupl > IGC_WUPM_BYTES) + return; + + skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igc_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + val = rd32(IGC_WUS); + if (val & WAKE_PKT_WUS) + igc_deliver_wake_packet(netdev); + + wr32(IGC_WUS, ~0); + + if (adapter->ecdev) { + ecdev_open(adapter->ecdev); + } else { + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igc_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + } + return err; +} + +static int __maybe_unused igc_runtime_resume(struct device *dev) +{ + return igc_resume(dev); +} + +static int __maybe_unused igc_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused igc_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (!igc_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} +#endif /* CONFIG_PM */ + +static void igc_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igc_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * igc_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current PCI connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igc_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igc_io_slot_reset - called after the PCI bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igc_resume routine. + **/ +static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + netdev_err(netdev, "Could not re-enable PCI device after reset\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter loses its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igc_reset(adapter); + wr32(IGC_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igc_io_resume - called when traffic can start to flow again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igc_resume routine. + */ +static void igc_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + if (igc_open(netdev)) { + netdev_err(netdev, "igc_open failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + rtnl_unlock(); +} + +static const struct pci_error_handlers igc_err_handler = { + .error_detected = igc_io_error_detected, + .slot_reset = igc_io_slot_reset, + .resume = igc_io_resume, +}; + +#ifdef CONFIG_PM +static const struct dev_pm_ops igc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) + SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, + igc_runtime_idle) +}; +#endif + +static struct pci_driver igc_driver = { + .name = igc_driver_name, + .id_table = igc_pci_tbl, + .probe = igc_probe, + .remove = igc_remove, +#ifdef CONFIG_PM + .driver.pm = &igc_pm_ops, +#endif + .shutdown = igc_shutdown, + .err_handler = &igc_err_handler, +}; + +/** + * igc_reinit_queues - return error + * @adapter: pointer to adapter structure + */ +int igc_reinit_queues(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (netif_running(netdev)) + igc_close(netdev); + + igc_reset_interrupt_capability(adapter); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igc_open(netdev); + + return err; +} + +/** + * igc_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + */ +struct net_device *igc_get_hw_dev(struct igc_hw *hw) +{ + struct igc_adapter *adapter = hw->back; + + return adapter->netdev; +} + +static void igc_disable_rx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 rxdctl; + + rxdctl = rd32(IGC_RXDCTL(idx)); + rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; + rxdctl |= IGC_RXDCTL_SWFLUSH; + wr32(IGC_RXDCTL(idx), rxdctl); +} + +void igc_disable_rx_ring(struct igc_ring *ring) +{ + igc_disable_rx_ring_hw(ring); + igc_clean_rx_ring(ring); +} + +void igc_enable_rx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_rx_ring(adapter, ring); + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); +} + +void igc_disable_tx_ring(struct igc_ring *ring) +{ + igc_disable_tx_ring_hw(ring); + igc_clean_tx_ring(ring); +} + +void igc_enable_tx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_tx_ring(adapter, ring); +} + +/** + * igc_init_module - Driver Registration Routine + * + * igc_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init igc_init_module(void) +{ + int ret; + + pr_info("%s\n", igc_driver_string); + pr_info("%s\n", igc_copyright); + + ret = pci_register_driver(&igc_driver); + return ret; +} + +module_init(igc_init_module); + +/** + * igc_exit_module - Driver Exit Cleanup Routine + * + * igc_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit igc_exit_module(void) +{ + pci_unregister_driver(&igc_driver); +} + +module_exit(igc_exit_module); +/* igc_main.c */ diff --git a/devices/igc/igc_main-6.6-orig.c b/devices/igc/igc_main-6.6-orig.c new file mode 100644 index 00000000..98de34d0 --- /dev/null +++ b/devices/igc/igc_main-6.6-orig.c @@ -0,0 +1,7453 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "igc.h" +#include "igc_hw.h" +#include "igc_tsn.h" +#include "igc_xdp.h" + +#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +#define IGC_XDP_PASS 0 +#define IGC_XDP_CONSUMED BIT(0) +#define IGC_XDP_TX BIT(1) +#define IGC_XDP_REDIRECT BIT(2) + +static int debug = -1; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL v2"); +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +char igc_driver_name[] = "igc"; +static const char igc_driver_string[] = DRV_SUMMARY; +static const char igc_copyright[] = + "Copyright(c) 2018 Intel Corporation."; + +static const struct igc_info *igc_info_tbl[] = { + [board_base] = &igc_base_info, +}; + +static const struct pci_device_id igc_pci_tbl[] = { + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, igc_pci_tbl); + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +void igc_reset(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_fc_info *fc = &hw->fc; + u32 pba, hwm; + + /* Repartition PBA for greater than 9k MTU if required */ + pba = IGC_PBA_34K; + + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame + */ + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + hw->mac.ops.reset_hw(hw); + + if (hw->mac.ops.init_hw(hw)) + netdev_err(dev, "Error on hardware initialization\n"); + + /* Re-establish EEE setting */ + igc_set_eee_i225(hw, true, true, true); + + if (!netif_running(adapter->netdev)) + igc_power_down_phy_copper_base(&adapter->hw); + + /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ + wr32(IGC_VET, ETH_P_8021Q); + + /* Re-enable PTP, where applicable. */ + igc_ptp_reset(adapter); + + /* Re-enable TSN offloading, where applicable. */ + igc_tsn_reset(adapter); + + igc_get_phy_info(hw); +} + +/** + * igc_power_up_link - Power up the phy link + * @adapter: address of board private structure + */ +static void igc_power_up_link(struct igc_adapter *adapter) +{ + igc_reset_phy(&adapter->hw); + + igc_power_up_phy_copper(&adapter->hw); + + igc_setup_link(&adapter->hw); +} + +/** + * igc_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void igc_release_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + if (!pci_device_is_present(adapter->pdev)) + return; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); +} + +/** + * igc_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + */ +static void igc_get_hw_control(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(IGC_CTRL_EXT); + wr32(IGC_CTRL_EXT, + ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); +} + +static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) +{ + dma_unmap_single(dev, dma_unmap_addr(buf, dma), + dma_unmap_len(buf, len), DMA_TO_DEVICE); + + dma_unmap_len_set(buf, len, 0); +} + +/** + * igc_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + */ +static void igc_clean_tx_ring(struct igc_ring *tx_ring) +{ + u16 i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + u32 xsk_frames = 0; + + while (i != tx_ring->next_to_use) { + union igc_adv_tx_desc *eop_desc, *tx_desc; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + dev_kfree_skb_any(tx_buffer->skb); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IGC_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + tx_buffer->next_to_watch = NULL; + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + if (tx_ring->xsk_pool && xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* Zero out the buffer ring */ + memset(tx_ring->tx_buffer_info, 0, + sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + /* reset next_to_use and next_to_clean */ + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igc_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + */ +void igc_free_tx_resources(struct igc_ring *tx_ring) +{ + igc_disable_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igc_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void igc_free_all_tx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * igc_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_tx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igc_clean_tx_ring(adapter->tx_ring[i]); +} + +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 txdctl; + + txdctl = rd32(IGC_TXDCTL(idx)); + txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; + txdctl |= IGC_TXDCTL_SWFLUSH; + wr32(IGC_TXDCTL(idx), txdctl); +} + +/** + * igc_disable_all_tx_rings_hw - Disable all transmit queue operation + * @adapter: board private structure + */ +static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + igc_disable_tx_ring_hw(tx_ring); + } +} + +/** + * igc_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + */ +int igc_setup_tx_resources(struct igc_ring *tx_ring) +{ + struct net_device *ndev = tx_ring->netdev; + struct device *dev = tx_ring->dev; + int size = 0; + + size = sizeof(struct igc_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_tx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igc_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + netdev_err(dev, "Error on Tx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + buffer_info->dma, + buffer_info->page_offset, + igc_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, + buffer_info->dma, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); + + i++; + if (i == rx_ring->count) + i = 0; + } +} + +static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) +{ + struct igc_rx_buffer *bi; + u16 i; + + for (i = 0; i < ring->count; i++) { + bi = &ring->rx_buffer_info[i]; + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + } +} + +/** + * igc_clean_rx_ring - Free Rx Buffers per Queue + * @ring: ring to free buffers from + */ +static void igc_clean_rx_ring(struct igc_ring *ring) +{ + if (ring->xsk_pool) + igc_clean_rx_ring_xsk_pool(ring); + else + igc_clean_rx_ring_page_shared(ring); + + clear_ring_uses_large_buffer(ring); + + ring->next_to_alloc = 0; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +/** + * igc_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + */ +static void igc_clean_all_rx_rings(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igc_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igc_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + */ +void igc_free_rx_resources(struct igc_ring *rx_ring) +{ + igc_clean_rx_ring(rx_ring); + + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igc_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + */ +static void igc_free_all_rx_resources(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + igc_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igc_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + */ +int igc_setup_rx_resources(struct igc_ring *rx_ring) +{ + struct net_device *ndev = rx_ring->netdev; + struct device *dev = rx_ring->dev; + u8 index = rx_ring->queue_index; + int size, desc_len, res; + + /* XDP RX-queue info */ + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, + rx_ring->q_vector->napi.napi_id); + if (res < 0) { + netdev_err(ndev, "Failed to register xdp_rxq index %u\n", + index); + return res; + } + + size = sizeof(struct igc_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + desc_len = sizeof(union igc_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igc_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int igc_setup_all_rx_resources(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igc_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + netdev_err(dev, "Error on Rx queue %u setup\n", i); + for (i--; i >= 0; i--) + igc_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + if (!igc_xdp_is_enabled(adapter) || + !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) + return NULL; + + return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); +} + +/** + * igc_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + union igc_adv_rx_desc *rx_desc; + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + u64 rdba = ring->dma; + u32 buf_size; + + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + if (ring->xsk_pool) { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL)); + } + + if (igc_xdp_is_enabled(adapter)) + set_ring_uses_large_buffer(ring); + + /* disable the queue */ + wr32(IGC_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(IGC_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(IGC_RDBAH(reg_idx), rdba >> 32); + wr32(IGC_RDLEN(reg_idx), + ring->count * sizeof(union igc_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = adapter->io_addr + IGC_RDT(reg_idx); + wr32(IGC_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* reset next-to- use/clean to place SW in sync with hardware */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + if (ring->xsk_pool) + buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); + else if (ring_uses_large_buffer(ring)) + buf_size = IGC_RXBUFFER_3072; + else + buf_size = IGC_RXBUFFER_2048; + + srrctl = rd32(IGC_SRRCTL(reg_idx)); + srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | + IGC_SRRCTL_DESCTYPE_MASK); + srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); + srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); + srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; + + wr32(IGC_SRRCTL(reg_idx), srrctl); + + rxdctl |= IGC_RX_PTHRESH; + rxdctl |= IGC_RX_HTHRESH << 8; + rxdctl |= IGC_RX_WTHRESH << 16; + + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct igc_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IGC_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + /* enable receive descriptor fetching */ + rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; + + wr32(IGC_RXDCTL(reg_idx), rxdctl); +} + +/** + * igc_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + */ +static void igc_configure_rx(struct igc_adapter *adapter) +{ + int i; + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + igc_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igc_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + */ +static void igc_configure_tx_ring(struct igc_adapter *adapter, + struct igc_ring *ring) +{ + struct igc_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + u64 tdba = ring->dma; + u32 txdctl = 0; + + ring->xsk_pool = igc_get_xsk_pool(adapter, ring); + + /* disable the queue */ + wr32(IGC_TXDCTL(reg_idx), 0); + wrfl(); + + wr32(IGC_TDLEN(reg_idx), + ring->count * sizeof(union igc_adv_tx_desc)); + wr32(IGC_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(IGC_TDBAH(reg_idx), tdba >> 32); + + ring->tail = adapter->io_addr + IGC_TDT(reg_idx); + wr32(IGC_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGC_TX_PTHRESH; + txdctl |= IGC_TX_HTHRESH << 8; + txdctl |= IGC_TX_WTHRESH << 16; + + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + wr32(IGC_TXDCTL(reg_idx), txdctl); +} + +/** + * igc_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + */ +static void igc_configure_tx(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igc_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igc_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + */ +static void igc_setup_mrqc(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); +} + +/** + * igc_setup_rctl - configure the receive control registers + * @adapter: Board private structure + */ +static void igc_setup_rctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(IGC_RCTL); + + rctl &= ~(3 << IGC_RCTL_MO_SHIFT); + rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); + + rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); + + /* enable stripping of CRC. Newer features require + * that the HW strips the CRC. + */ + rctl |= IGC_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); + + /* enable LPE to allow for reception of jumbo frames */ + rctl |= IGC_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(IGC_RXDCTL(0), 0); + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in set_rx_mode + */ + rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ + IGC_RCTL_BAM | /* RX All Bcast Pkts */ + IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ + IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ + } + + wr32(IGC_RCTL, rctl); +} + +/** + * igc_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + */ +static void igc_setup_tctl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which icould be enabled by default */ + wr32(IGC_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_CT; + tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | + (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); + + /* Enable transmits */ + tctl |= IGC_TCTL_EN; + + wr32(IGC_TCTL, tctl); +} + +/** + * igc_set_mac_filter_hw() - Set MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be set + * @index: Filter index + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + */ +static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, + enum igc_mac_filter_type type, + const u8 *addr, int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 ral, rah; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + ral = le32_to_cpup((__le32 *)(addr)); + rah = le16_to_cpup((__le16 *)(addr + 4)); + + if (type == IGC_MAC_FILTER_TYPE_SRC) { + rah &= ~IGC_RAH_ASEL_MASK; + rah |= IGC_RAH_ASEL_SRC_ADDR; + } + + if (queue >= 0) { + rah &= ~IGC_RAH_QSEL_MASK; + rah |= (queue << IGC_RAH_QSEL_SHIFT); + rah |= IGC_RAH_QSEL_ENABLE; + } + + rah |= IGC_RAH_AV; + + wr32(IGC_RAL(index), ral); + wr32(IGC_RAH(index), rah); + + netdev_dbg(dev, "MAC address filter set in HW: index %d", index); +} + +/** + * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware + * @adapter: Pointer to adapter where the filter should be cleared + * @index: Filter index + */ +static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + + if (WARN_ON(index >= hw->mac.rar_entry_count)) + return; + + wr32(IGC_RAL(index), 0); + wr32(IGC_RAH(index), 0); + + netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); +} + +/* Set default MAC address for the PF in the first RAR entry */ +static void igc_set_default_mac_filter(struct igc_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + u8 *addr = adapter->hw.mac.addr; + + netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); + + igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +/** + * igc_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int igc_set_mac(struct net_device *netdev, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igc_set_default_mac_filter(adapter); + + return 0; +} + +/** + * igc_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igc_write_mc_addr_list(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igc_update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igc_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, + bool *first_flag, bool *insert_empty) +{ + struct igc_adapter *adapter = netdev_priv(ring->netdev); + ktime_t cycle_time = adapter->cycle_time; + ktime_t base_time = adapter->base_time; + ktime_t now = ktime_get_clocktai(); + ktime_t baset_est, end_of_cycle; + s32 launchtime; + s64 n; + + n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); + + baset_est = ktime_add_ns(base_time, cycle_time * (n)); + end_of_cycle = ktime_add_ns(baset_est, cycle_time); + + if (ktime_compare(txtime, end_of_cycle) >= 0) { + if (baset_est != ring->last_ff_cycle) { + *first_flag = true; + ring->last_ff_cycle = baset_est; + + if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) + *insert_empty = true; + } + } + + /* Introducing a window at end of cycle on which packets + * potentially not honor launchtime. Window of 5us chosen + * considering software update the tail pointer and packets + * are dma'ed to packet buffer. + */ + if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) + netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", + txtime); + + ring->last_tx_cycle = end_of_cycle; + + launchtime = ktime_sub_ns(txtime, baset_est); + if (launchtime > 0) + div_s64_rem(launchtime, cycle_time, &launchtime); + else + launchtime = 0; + + return cpu_to_le32(launchtime); +} + +static int igc_init_empty_frame(struct igc_ring *ring, + struct igc_tx_buffer *buffer, + struct sk_buff *skb) +{ + unsigned int size; + dma_addr_t dma; + + size = skb_headlen(skb); + + dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->protocol = 0; + buffer->bytecount = skb->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, skb->len); + dma_unmap_addr_set(buffer, dma, dma); + + return 0; +} + +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) +{ + union igc_adv_tx_desc *desc; + u32 cmd_type, olinfo_status; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + err = igc_init_empty_frame(ring, first, skb); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + first->bytecount; + olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); + + netdev_tx_sent_queue(txring_txq(ring), skb->len); + + first->next_to_watch = desc; + + ring->next_to_use++; + if (ring->next_to_use == ring->count) + ring->next_to_use = 0; + + return 0; +} + +#define IGC_EMPTY_FRAME_SIZE 60 + +static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, + __le32 launch_time, bool first_flag, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct igc_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IGC_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; + + /* For i225, context index must be unique per ring. */ + if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + if (first_flag) + mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + context_desc->launch_time = launch_time; +} + +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) + return; + goto no_csum; + } + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + fallthrough; + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (skb_csum_is_sctp(skb)) { + type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto csum_failed; + } + + /* update TX checksum flag */ + first->tx_flags |= IGC_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, 0); +} + +static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* memory barriier comment */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igc_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) +{ + if (igc_desc_unused(tx_ring) >= size) + return 0; + return __igc_maybe_stop_tx(tx_ring, size); +} + +#define IGC_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + +static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IGC_ADVTXD_DTYP_DATA | + IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, + IGC_ADVTXD_DCMD_VLE); + + /* set segmentation bits for TSO */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, + (IGC_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present, will select the register set + * based on the _TSTAMP(_X) bit. + */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, + (IGC_ADVTXD_MAC_TSTAMP)); + + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1, + (IGC_ADVTXD_TSTAMP_REG_1)); + + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2, + (IGC_ADVTXD_TSTAMP_REG_2)); + + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3, + (IGC_ADVTXD_TSTAMP_REG_3)); + + /* insert frame checksum */ + cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igc_tx_olinfo_status(struct igc_ring *tx_ring, + union igc_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; + + /* insert L4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) * + ((IGC_TXD_POPTS_TXSM << 8) / + IGC_TX_FLAGS_CSUM); + + /* insert IPv4 checksum */ + olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) * + (((IGC_TXD_POPTS_IXSM << 8)) / + IGC_TX_FLAGS_IPV4); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int igc_tx_map(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 tx_flags = first->tx_flags; + skb_frag_t *frag; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + dma_addr_t dma; + u32 cmd_type; + + cmd_type = igc_tx_cmd_type(skb, tx_flags); + tx_desc = IGC_TX_DESC(tx_ring, i); + + igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGC_MAX_DATA_PER_TXD; + size -= IGC_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGC_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGC_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igc_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + writel(i, tx_ring->tail); + } + + return 0; +dma_error: + netdev_err(tx_ring->netdev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + if (i-- == 0) + i += tx_ring->count; + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + + tx_ring->next_to_use = i; + + return -1; +} + +static int igc_tso(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + __le32 launch_time, bool first_flag, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM | + IGC_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, + vlan_macip_lens, type_tucmd, mss_l4len_idx); + + return 1; +} + +static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags) +{ + int i; + + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; + + if (tstamp->skb) + continue; + + tstamp->skb = skb_get(skb); + tstamp->start = jiffies; + *flags = tstamp->flags; + + return true; + } + + return false; +} + +static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, + struct igc_ring *tx_ring) +{ + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + bool first_flag = false, insert_empty = false; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + struct igc_tx_buffer *first; + __le32 launch_time = 0; + u32 tx_flags = 0; + unsigned short f; + ktime_t txtime; + u8 hdr_len = 0; + int tso = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f])); + + if (igc_maybe_stop_tx(tx_ring, count + 5)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (!tx_ring->launchtime_enable) + goto done; + + txtime = skb->tstamp; + skb->tstamp = ktime_set(0, 0); + launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); + + if (insert_empty) { + struct igc_tx_buffer *empty_info; + struct sk_buff *empty; + void *data; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (!empty) + goto done; + + data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + if (igc_init_tx_empty_descriptor(tx_ring, + empty, + empty_info) < 0) + dev_kfree_skb_any(empty); + } + +done: + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->type = IGC_TX_BUFFER_TYPE_SKB; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (adapter->qbv_transition || tx_ring->oper_gate_closed) + goto out_drop; + + if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { + adapter->stats.txdrop++; + goto out_drop; + } + + if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && + skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + /* FIXME: add support for retrieving timestamps from + * the other timer registers before skipping the + * timestamping request. + */ + unsigned long flags; + u32 tstamp_flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags; + } else { + adapter->tx_hwtstamp_skipped++; + } + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGC_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igc_tx_csum(tx_ring, first, launch_time, first_flag); + + igc_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + + return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); +} + +static void igc_rx_checksum(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igc_test_staterr(rx_desc, + IGC_RXDEXT_STATERR_L4E | + IGC_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets (aka let the stack check the crc32c) + */ + if (!(skb->len == 60 && + test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | + IGC_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netdev_dbg(ring->netdev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +/* Mapping HW RSS Type to enum pkt_hash_types */ +static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { + [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2, + [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3, + [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4, + [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, + [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ + [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */ + [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */ + [13] = PKT_HASH_TYPE_NONE, + [14] = PKT_HASH_TYPE_NONE, + [15] = PKT_HASH_TYPE_NONE, +}; + +static inline void igc_rx_hash(struct igc_ring *ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) { + u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + u32 rss_type = igc_rss_type(rx_desc); + + skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]); + } +} + +static void igc_rx_vlan(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + u16 vid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { + if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && + test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/** + * igc_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in order + * to populate the hash, checksum, VLAN, protocol, and other fields within the + * skb. + */ +static void igc_process_skb_fields(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + igc_rx_hash(rx_ring, rx_desc, skb); + + igc_rx_checksum(rx_ring, rx_desc, skb); + + igc_rx_vlan(rx_ring, rx_desc, skb); + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_CTRL); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl |= IGC_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~IGC_CTRL_VME; + } + wr32(IGC_CTRL, ctrl); +} + +static void igc_restore_vlan(struct igc_adapter *adapter) +{ + igc_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, + const unsigned int size, + int *rx_buffer_pgcnt) +{ + struct igc_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, + unsigned int truesize) +{ +#if (PAGE_SIZE < 8192) + buffer->page_offset ^= truesize; +#else + buffer->page_offset += truesize; +#endif +} + +static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(ring) / 2; +#else + truesize = ring_uses_build_skb(ring) ? + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +/** + * igc_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of buffer to be added + * + * This function will add the data contained in rx_buffer->page to the skb. + */ +static void igc_add_rx_frag(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = igc_rx_pg_size(rx_ring) / 2; +#else + truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IGC_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + + igc_rx_buffer_flip(rx_buffer, truesize); +} + +static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp) +{ + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* build an skb around the page buffer */ + skb = napi_build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, size); + if (metasize) + skb_metadata_set(skb, metasize); + + igc_rx_buffer_flip(rx_buffer, truesize); + return skb; +} + +static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int size = xdp->data_end - xdp->data; + unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + void *va = xdp->data; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + net_prefetch(xdp->data_meta); + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IGC_RX_HDR_LEN + metasize); + if (unlikely(!skb)) + return NULL; + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > IGC_RX_HDR_LEN) + headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, + ALIGN(headlen + metasize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); + igc_rx_buffer_flip(rx_buffer, truesize); + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +/** + * igc_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void igc_reuse_rx_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct igc_rx_buffer *new_buff; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote and pfmemalloc pages */ + if (!dev_page_is_reusable(page)) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + return false; +#else +#define IGC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) + + if (rx_buffer->page_offset > IGC_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * igc_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + */ +static bool igc_is_non_eop(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGC_RX_DESC(rx_ring, ntc)); + + if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igc_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool igc_cleanup_headers(struct igc_ring *rx_ring, + union igc_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; + + if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static void igc_put_rx_buffer(struct igc_ring *rx_ring, + struct igc_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) +{ + if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { + /* hand second half of page back to the ring */ + igc_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* We are not reusing the buffer so unmap it and free + * any references we are holding to it + */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) +{ + struct igc_adapter *adapter = rx_ring->q_vector->adapter; + + if (ring_uses_build_skb(rx_ring)) + return IGC_SKB_PAD; + if (igc_xdp_is_enabled(adapter)) + return XDP_PACKET_HEADROOM; + + return 0; +} + +static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, + struct igc_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + igc_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IGC_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = igc_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +/** + * igc_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: rx descriptor ring + * @cleaned_count: number of buffers to clean + */ +static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) +{ + union igc_adv_rx_desc *rx_desc; + u16 i = rx_ring->next_to_use; + struct igc_rx_buffer *bi; + u16 bufsz; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGC_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + bufsz = igc_rx_bufsz(rx_ring); + + do { + if (!igc_alloc_mapped_page(rx_ring, bi)) + break; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGC_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) +{ + union igc_adv_rx_desc *desc; + u16 i = ring->next_to_use; + struct igc_rx_buffer *bi; + dma_addr_t dma; + bool ok = true; + + if (!count) + return ok; + + XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff); + + desc = IGC_RX_DESC(ring, i); + bi = &ring->rx_buffer_info[i]; + i -= ring->count; + + do { + bi->xdp = xsk_buff_alloc(ring->xsk_pool); + if (!bi->xdp) { + ok = false; + break; + } + + dma = xsk_buff_xdp_get_dma(bi->xdp); + desc->read.pkt_addr = cpu_to_le64(dma); + + desc++; + bi++; + i++; + if (unlikely(!i)) { + desc = IGC_RX_DESC(ring, 0); + bi = ring->rx_buffer_info; + i -= ring->count; + } + + /* Clear the length for the next_to_use descriptor. */ + desc->wb.upper.length = 0; + + count--; + } while (count); + + i += ring->count; + + if (ring->next_to_use != i) { + ring->next_to_use = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, ring->tail); + } + + return ok; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, + struct xdp_frame *xdpf) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 count, index = ring->next_to_use; + struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; + struct igc_tx_buffer *buffer = head; + union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); + u32 olinfo_status, len = xdpf->len, cmd_type; + void *data = xdpf->data; + u16 i; + + count = TXD_USE_COUNT(len); + for (i = 0; i < nr_frags; i++) + count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); + + if (igc_maybe_stop_tx(ring, count + 3)) { + /* this is a hard error */ + return -EBUSY; + } + + i = 0; + head->bytecount = xdp_get_frame_len(xdpf); + head->type = IGC_TX_BUFFER_TYPE_XDP; + head->gso_segs = 1; + head->xdpf = xdpf; + + olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + for (;;) { + dma_addr_t dma; + + dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, + "Failed to map DMA for TX\n"); + goto unmap; + } + + dma_unmap_len_set(buffer, len, len); + dma_unmap_addr_set(buffer, dma, dma); + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | len; + + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.buffer_addr = cpu_to_le64(dma); + + buffer->protocol = 0; + + if (++index == ring->count) + index = 0; + + if (i == nr_frags) + break; + + buffer = &ring->tx_buffer_info[index]; + desc = IGC_TX_DESC(ring, index); + desc->read.olinfo_status = 0; + + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } + desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); + + netdev_tx_sent_queue(txring_txq(ring), head->bytecount); + /* set the timestamp */ + head->time_stamp = jiffies; + /* set next_to_watch value indicating a packet is present */ + head->next_to_watch = desc; + ring->next_to_use = index; + + return 0; + +unmap: + for (;;) { + buffer = &ring->tx_buffer_info[index]; + if (dma_unmap_len(buffer, len)) + dma_unmap_page(ring->dev, + dma_unmap_addr(buffer, dma), + dma_unmap_len(buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(buffer, len, 0); + if (buffer == head) + break; + + if (!index) + index += ring->count; + index--; + } + + return -ENOMEM; +} + +static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, + int cpu) +{ + int index = cpu; + + if (unlikely(index < 0)) + index = 0; + + while (index >= adapter->num_tx_queues) + index -= adapter->num_tx_queues; + + return adapter->tx_ring[index]; +} + +static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int res; + + if (unlikely(!xdpf)) + return -EFAULT; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + res = igc_xdp_init_tx_descriptor(ring, xdpf); + __netif_tx_unlock(nq); + return res; +} + +/* This function assumes rcu_read_lock() is held by the caller. */ +static int __igc_xdp_run_prog(struct igc_adapter *adapter, + struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + u32 act = bpf_prog_run_xdp(prog, xdp); + + switch (act) { + case XDP_PASS: + return IGC_XDP_PASS; + case XDP_TX: + if (igc_xdp_xmit_back(adapter, xdp) < 0) + goto out_failure; + return IGC_XDP_TX; + case XDP_REDIRECT: + if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) + goto out_failure; + return IGC_XDP_REDIRECT; + break; + default: + bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + return IGC_XDP_CONSUMED; + } +} + +static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, + struct xdp_buff *xdp) +{ + struct bpf_prog *prog; + int res; + + prog = READ_ONCE(adapter->xdp_prog); + if (!prog) { + res = IGC_XDP_PASS; + goto out; + } + + res = __igc_xdp_run_prog(adapter, prog, xdp); + +out: + return ERR_PTR(-res); +} + +/* This function assumes __netif_tx_lock is held by the caller. */ +static void igc_flush_tx_descriptors(struct igc_ring *ring) +{ + /* Once tail pointer is updated, hardware can fetch the descriptors + * any time so we issue a write membar here to ensure all memory + * writes are complete before the tail pointer is updated. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static void igc_finalize_xdp(struct igc_adapter *adapter, int status) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + + if (status & IGC_XDP_TX) { + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + igc_flush_tx_descriptors(ring); + __netif_tx_unlock(nq); + } + + if (status & IGC_XDP_REDIRECT) + xdp_do_flush(); +} + +static void igc_update_rx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->rx.ring; + + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.packets += packets; + ring->rx_stats.bytes += bytes; + u64_stats_update_end(&ring->rx_syncp); + + q_vector->rx.total_packets += packets; + q_vector->rx.total_bytes += bytes; +} + +static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + u16 cleaned_count = igc_desc_unused(rx_ring); + int xdp_status = 0, rx_buffer_pgcnt; + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *rx_desc; + struct igc_rx_buffer *rx_buffer; + unsigned int size, truesize; + struct igc_xdp_buff ctx; + ktime_t timestamp = 0; + int pkt_offset = 0; + void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGC_RX_BUFFER_WRITE) { + igc_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); + truesize = igc_get_rx_frame_truesize(rx_ring, size); + + pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; + + if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + pktbuf); + ctx.rx_ts = timestamp; + pkt_offset = IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + if (!skb) { + xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); + xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), + igc_rx_offset(rx_ring) + pkt_offset, + size, true); + xdp_buff_clear_frags_flag(&ctx.xdp); + ctx.rx_desc = rx_desc; + + skb = igc_xdp_run_prog(adapter, &ctx.xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + switch (xdp_res) { + case IGC_XDP_CONSUMED: + rx_buffer->pagecnt_bias++; + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + igc_rx_buffer_flip(rx_buffer, truesize); + xdp_status |= xdp_res; + break; + } + + total_packets++; + total_bytes += size; + } else if (skb) + igc_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp); + else + skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp, + timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igc_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + igc_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (cleaned_count) + igc_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets; +} + +static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, + struct xdp_buff *xdp) +{ + unsigned int totalsize = xdp->data_end - xdp->data_meta; + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + + net_prefetch(xdp->data_meta); + + skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + return skb; +} + +static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, + union igc_adv_rx_desc *desc, + struct xdp_buff *xdp, + ktime_t timestamp) +{ + struct igc_ring *ring = q_vector->rx.ring; + struct sk_buff *skb; + + skb = igc_construct_skb_zc(ring, xdp); + if (!skb) { + ring->rx_stats.alloc_failed++; + return; + } + + if (timestamp) + skb_hwtstamps(skb)->hwtstamp = timestamp; + + if (igc_cleanup_headers(ring, desc, skb)) + return; + + igc_process_skb_fields(ring, desc, skb); + napi_gro_receive(&q_vector->napi, skb); +} + +static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp) +{ + /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The + * igc_xdp_buff shares its layout with xdp_buff_xsk and private + * igc_xdp_buff fields fall into xdp_buff_xsk->cb + */ + return (struct igc_xdp_buff *)xdp; +} + +static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_ring *ring = q_vector->rx.ring; + u16 cleaned_count = igc_desc_unused(ring); + int total_bytes = 0, total_packets = 0; + u16 ntc = ring->next_to_clean; + struct bpf_prog *prog; + bool failure = false; + int xdp_status = 0; + + rcu_read_lock(); + + prog = READ_ONCE(adapter->xdp_prog); + + while (likely(total_packets < budget)) { + union igc_adv_rx_desc *desc; + struct igc_rx_buffer *bi; + struct igc_xdp_buff *ctx; + ktime_t timestamp = 0; + unsigned int size; + int res; + + desc = IGC_RX_DESC(ring, ntc); + size = le16_to_cpu(desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = &ring->rx_buffer_info[ntc]; + + ctx = xsk_buff_to_igc_ctx(bi->xdp); + ctx->rx_desc = desc; + + if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) { + timestamp = igc_ptp_rx_pktstamp(q_vector->adapter, + bi->xdp->data); + ctx->rx_ts = timestamp; + + bi->xdp->data += IGC_TS_HDR_LEN; + + /* HW timestamp has been copied into local variable. Metadata + * length when XDP program is called should be 0. + */ + bi->xdp->data_meta += IGC_TS_HDR_LEN; + size -= IGC_TS_HDR_LEN; + } + + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); + + res = __igc_xdp_run_prog(adapter, prog, bi->xdp); + switch (res) { + case IGC_XDP_PASS: + igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); + fallthrough; + case IGC_XDP_CONSUMED: + xsk_buff_free(bi->xdp); + break; + case IGC_XDP_TX: + case IGC_XDP_REDIRECT: + xdp_status |= res; + break; + } + + bi->xdp = NULL; + total_bytes += size; + total_packets++; + cleaned_count++; + ntc++; + if (ntc == ring->count) + ntc = 0; + } + + ring->next_to_clean = ntc; + rcu_read_unlock(); + + if (cleaned_count >= IGC_RX_BUFFER_WRITE) + failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count); + + if (xdp_status) + igc_finalize_xdp(adapter, xdp_status); + + igc_update_rx_stats(q_vector, total_packets, total_bytes); + + if (xsk_uses_need_wakeup(ring->xsk_pool)) { + if (failure || ring->next_to_clean == ring->next_to_use) + xsk_set_rx_need_wakeup(ring->xsk_pool); + else + xsk_clear_rx_need_wakeup(ring->xsk_pool); + return total_packets; + } + + return failure ? budget : total_packets; +} + +static void igc_update_tx_stats(struct igc_q_vector *q_vector, + unsigned int packets, unsigned int bytes) +{ + struct igc_ring *ring = q_vector->tx.ring; + + u64_stats_update_begin(&ring->tx_syncp); + ring->tx_stats.bytes += bytes; + ring->tx_stats.packets += packets; + u64_stats_update_end(&ring->tx_syncp); + + q_vector->tx.total_bytes += bytes; + q_vector->tx.total_packets += packets; +} + +static void igc_xdp_xmit_zc(struct igc_ring *ring) +{ + struct xsk_buff_pool *pool = ring->xsk_pool; + struct netdev_queue *nq = txring_txq(ring); + union igc_adv_tx_desc *tx_desc = NULL; + int cpu = smp_processor_id(); + struct xdp_desc xdp_desc; + u16 budget, ntu; + + if (!netif_carrier_ok(ring->netdev)) + return; + + __netif_tx_lock(nq, cpu); + + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + + ntu = ring->next_to_use; + budget = igc_desc_unused(ring); + + while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { + u32 cmd_type, olinfo_status; + struct igc_tx_buffer *bi; + dma_addr_t dma; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + xdp_desc.len; + olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; + + dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); + + tx_desc = IGC_TX_DESC(ring, ntu); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + bi = &ring->tx_buffer_info[ntu]; + bi->type = IGC_TX_BUFFER_TYPE_XSK; + bi->protocol = 0; + bi->bytecount = xdp_desc.len; + bi->gso_segs = 1; + bi->time_stamp = jiffies; + bi->next_to_watch = tx_desc; + + netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); + + ntu++; + if (ntu == ring->count) + ntu = 0; + } + + ring->next_to_use = ntu; + if (tx_desc) { + igc_flush_tx_descriptors(ring); + xsk_tx_release(pool); + } + + __netif_tx_unlock(nq); +} + +/** + * igc_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll + * + * returns true if ring is completely cleaned + */ +static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) +{ + struct igc_adapter *adapter = q_vector->adapter; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + struct igc_ring *tx_ring = q_vector->tx.ring; + unsigned int i = tx_ring->next_to_clean; + struct igc_tx_buffer *tx_buffer; + union igc_adv_tx_desc *tx_desc; + u32 xsk_frames = 0; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGC_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + switch (tx_buffer->type) { + case IGC_TX_BUFFER_TYPE_XSK: + xsk_frames++; + break; + case IGC_TX_BUFFER_TYPE_XDP: + xdp_return_frame(tx_buffer->xdpf); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + case IGC_TX_BUFFER_TYPE_SKB: + napi_consume_skb(tx_buffer->skb, napi_budget); + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + break; + default: + netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); + break; + } + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGC_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + igc_update_tx_stats(q_vector, total_packets, total_bytes); + + if (tx_ring->xsk_pool) { + if (xsk_frames) + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) + xsk_set_tx_need_wakeup(tx_ring->xsk_pool); + igc_xdp_xmit_zc(tx_ring); + } + + if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct igc_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && + (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && + !tx_ring->oper_gate_closed) { + /* detected Tx unit hang */ + netdev_err(tx_ring->netdev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(IGC_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && + netif_carrier_ok(tx_ring->netdev) && + igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGC_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +static int igc_find_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 ral, rah; + int i; + + for (i = 0; i < max_entries; i++) { + ral = rd32(IGC_RAL(i)); + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + continue; + if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) + continue; + if ((rah & IGC_RAH_RAH_MASK) != + le16_to_cpup((__le16 *)(addr + 4))) + continue; + if (ral != le32_to_cpup((__le32 *)(addr))) + continue; + + return i; + } + + return -1; +} + +static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int max_entries = hw->mac.rar_entry_count; + u32 rah; + int i; + + for (i = 0; i < max_entries; i++) { + rah = rd32(IGC_RAH(i)); + + if (!(rah & IGC_RAH_AV)) + return i; + } + + return -1; +} + +/** + * igc_add_mac_filter() - Add MAC address filter + * @adapter: Pointer to adapter where the filter should be added + * @type: MAC address filter type (source or destination) + * @addr: MAC address + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr, + int queue) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index >= 0) + goto update_filter; + + index = igc_get_avail_mac_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", + index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr, queue); + +update_filter: + igc_set_mac_filter_hw(adapter, index, type, addr, queue); + return 0; +} + +/** + * igc_del_mac_filter() - Delete MAC address filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @type: MAC address filter type (source or destination) + * @addr: MAC address + */ +static void igc_del_mac_filter(struct igc_adapter *adapter, + enum igc_mac_filter_type type, const u8 *addr) +{ + struct net_device *dev = adapter->netdev; + int index; + + index = igc_find_mac_filter(adapter, type, addr); + if (index < 0) + return; + + if (index == 0) { + /* If this is the default filter, we don't actually delete it. + * We just reset to its default value i.e. disable queue + * assignment. + */ + netdev_dbg(dev, "Disable default MAC filter queue assignment"); + + igc_set_mac_filter_hw(adapter, 0, type, addr, -1); + } else { + netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", + index, + type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src", + addr); + + igc_clear_mac_filter_hw(adapter, index); + } +} + +/** + * igc_add_vlan_prio_filter() - Add VLAN priority filter + * @adapter: Pointer to adapter where the filter should be added + * @prio: VLAN priority value + * @queue: Queue number which matching frames are assigned to + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, + int queue) +{ + struct net_device *dev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + if (vlanpqf & IGC_VLANPQF_VALID(prio)) { + netdev_dbg(dev, "VLAN priority filter already in use\n"); + return -EEXIST; + } + + vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); + vlanpqf |= IGC_VLANPQF_VALID(prio); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", + prio, queue); + return 0; +} + +/** + * igc_del_vlan_prio_filter() - Delete VLAN priority filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @prio: VLAN priority value + */ +static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) +{ + struct igc_hw *hw = &adapter->hw; + u32 vlanpqf; + + vlanpqf = rd32(IGC_VLANPQF); + + vlanpqf &= ~IGC_VLANPQF_VALID(prio); + vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); + + wr32(IGC_VLANPQF, vlanpqf); + + netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", + prio); +} + +static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if (!(etqf & IGC_ETQF_FILTER_ENABLE)) + return i; + } + + return -1; +} + +/** + * igc_add_etype_filter() - Add ethertype filter + * @adapter: Pointer to adapter where the filter should be added + * @etype: Ethertype value + * @queue: If non-negative, queue assignment feature is enabled and frames + * matching the filter are enqueued onto 'queue'. Otherwise, queue + * assignment is disabled. + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, + int queue) +{ + struct igc_hw *hw = &adapter->hw; + int index; + u32 etqf; + + index = igc_get_avail_etype_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + etqf = rd32(IGC_ETQF(index)); + + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= etype; + + if (queue >= 0) { + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); + etqf |= IGC_ETQF_QUEUE_ENABLE; + } + + etqf |= IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(index), etqf); + + netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", + etype, queue); + return 0; +} + +static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < MAX_ETYPE_FILTER; i++) { + u32 etqf = rd32(IGC_ETQF(i)); + + if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) + return i; + } + + return -1; +} + +/** + * igc_del_etype_filter() - Delete ethertype filter + * @adapter: Pointer to adapter where the filter should be deleted from + * @etype: Ethertype value + */ +static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) +{ + struct igc_hw *hw = &adapter->hw; + int index; + + index = igc_find_etype_filter(adapter, etype); + if (index < 0) + return; + + wr32(IGC_ETQF(index), 0); + + netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", + etype); +} + +static int igc_flex_filter_select(struct igc_adapter *adapter, + struct igc_flex_filter *input, + u32 *fhft) +{ + struct igc_hw *hw = &adapter->hw; + u8 fhft_index; + u32 fhftsl; + + if (input->index >= MAX_FLEX_FILTER) { + dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); + return -EINVAL; + } + + /* Indirect table select register */ + fhftsl = rd32(IGC_FHFTSL); + fhftsl &= ~IGC_FHFTSL_FTSL_MASK; + switch (input->index) { + case 0 ... 7: + fhftsl |= 0x00; + break; + case 8 ... 15: + fhftsl |= 0x01; + break; + case 16 ... 23: + fhftsl |= 0x02; + break; + case 24 ... 31: + fhftsl |= 0x03; + break; + } + wr32(IGC_FHFTSL, fhftsl); + + /* Normalize index down to host table register */ + fhft_index = input->index % 8; + + *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : + IGC_FHFT_EXT(fhft_index - 4); + + return 0; +} + +static int igc_write_flex_filter_ll(struct igc_adapter *adapter, + struct igc_flex_filter *input) +{ + struct device *dev = &adapter->pdev->dev; + struct igc_hw *hw = &adapter->hw; + u8 *data = input->data; + u8 *mask = input->mask; + u32 queuing; + u32 fhft; + u32 wufc; + int ret; + int i; + + /* Length has to be aligned to 8. Otherwise the filter will fail. Bail + * out early to avoid surprises later. + */ + if (input->length % 8 != 0) { + dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); + return -EINVAL; + } + + /* Select corresponding flex filter register and get base for host table. */ + ret = igc_flex_filter_select(adapter, input, &fhft); + if (ret) + return ret; + + /* When adding a filter globally disable flex filter feature. That is + * recommended within the datasheet. + */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + + /* Configure filter */ + queuing = input->length & IGC_FHFT_LENGTH_MASK; + queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; + queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; + + if (input->immediate_irq) + queuing |= IGC_FHFT_IMM_INT; + + if (input->drop) + queuing |= IGC_FHFT_DROP; + + wr32(fhft + 0xFC, queuing); + + /* Write data (128 byte) and mask (128 bit) */ + for (i = 0; i < 16; ++i) { + const size_t data_idx = i * 8; + const size_t row_idx = i * 16; + u32 dw0 = + (data[data_idx + 0] << 0) | + (data[data_idx + 1] << 8) | + (data[data_idx + 2] << 16) | + (data[data_idx + 3] << 24); + u32 dw1 = + (data[data_idx + 4] << 0) | + (data[data_idx + 5] << 8) | + (data[data_idx + 6] << 16) | + (data[data_idx + 7] << 24); + u32 tmp; + + /* Write row: dw0, dw1 and mask */ + wr32(fhft + row_idx, dw0); + wr32(fhft + row_idx + 4, dw1); + + /* mask is only valid for MASK(7, 0) */ + tmp = rd32(fhft + row_idx + 8); + tmp &= ~GENMASK(7, 0); + tmp |= mask[i]; + wr32(fhft + row_idx + 8, tmp); + } + + /* Enable filter. */ + wufc |= IGC_WUFC_FLEX_HQ; + if (input->index > 8) { + /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); + + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc |= (IGC_WUFC_FLX0 << input->index); + } + wr32(IGC_WUFC, wufc); + + dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", + input->index); + + return 0; +} + +static void igc_flex_filter_add_field(struct igc_flex_filter *flex, + const void *src, unsigned int offset, + size_t len, const void *mask) +{ + int i; + + /* data */ + memcpy(&flex->data[offset], src, len); + + /* mask */ + for (i = 0; i < len; ++i) { + const unsigned int idx = i + offset; + const u8 *ptr = mask; + + if (mask) { + if (ptr[i] & 0xff) + flex->mask[idx / 8] |= BIT(idx % 8); + + continue; + } + + flex->mask[idx / 8] |= BIT(idx % 8); + } +} + +static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + int i; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + for (i = 0; i < MAX_FLEX_FILTER; i++) { + if (i < 8) { + if (!(wufc & (IGC_WUFC_FLX0 << i))) + return i; + } else { + if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) + return i; + } + } + + return -ENOSPC; +} + +static bool igc_flex_filter_in_use(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + if (wufc & IGC_WUFC_FILTER_MASK) + return true; + + if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) + return true; + + return false; +} + +static int igc_add_flex_filter(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct igc_flex_filter flex = { }; + struct igc_nfc_filter *filter = &rule->filter; + unsigned int eth_offset, user_offset; + int ret, index; + bool vlan; + + index = igc_find_avail_flex_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + /* Construct the flex filter: + * -> dest_mac [6] + * -> src_mac [6] + * -> tpid [2] + * -> vlan tci [2] + * -> ether type [2] + * -> user data [8] + * -> = 26 bytes => 32 length + */ + flex.index = index; + flex.length = 32; + flex.rx_queue = rule->action; + + vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; + eth_offset = vlan ? 16 : 12; + user_offset = vlan ? 18 : 14; + + /* Add destination MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, + ETH_ALEN, NULL); + + /* Add source MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->src_addr, 6, + ETH_ALEN, NULL); + + /* Add VLAN etype */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) + igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, + sizeof(filter->vlan_etype), + NULL); + + /* Add VLAN TCI */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, + sizeof(filter->vlan_tci), NULL); + + /* Add Ether type */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + __be16 etype = cpu_to_be16(filter->etype); + + igc_flex_filter_add_field(&flex, &etype, eth_offset, + sizeof(etype), NULL); + } + + /* Add user data */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) + igc_flex_filter_add_field(&flex, &filter->user_data, + user_offset, + sizeof(filter->user_data), + filter->user_mask); + + /* Add it down to the hardware and enable it. */ + ret = igc_write_flex_filter_ll(adapter, &flex); + if (ret) + return ret; + + filter->flex_index = index; + + return 0; +} + +static void igc_del_flex_filter(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc; + + /* Just disable the filter. The filter table itself is kept + * intact. Another flex_filter_add() should override the "old" data + * then. + */ + if (reg_index > 8) { + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc = rd32(IGC_WUFC); + + wufc &= ~(IGC_WUFC_FLX0 << reg_index); + wr32(IGC_WUFC, wufc); + } + + if (igc_flex_filter_in_use(adapter)) + return; + + /* No filters are in use, we may disable flex filters */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); +} + +static int igc_enable_nfc_rule(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + int err; + + if (rule->flex) { + return igc_add_flex_filter(adapter, rule); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_add_etype_filter(adapter, rule->filter.etype, + rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr, rule->action); + if (err) + return err; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + err = igc_add_vlan_prio_filter(adapter, prio, rule->action); + if (err) + return err; + } + + return 0; +} + +static void igc_disable_nfc_rule(struct igc_adapter *adapter, + const struct igc_nfc_rule *rule) +{ + if (rule->flex) { + igc_del_flex_filter(adapter, rule->filter.flex_index); + return; + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_del_etype_filter(adapter, rule->filter.etype); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; + + igc_del_vlan_prio_filter(adapter, prio); + } + + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC, + rule->filter.src_addr); + + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, + rule->filter.dst_addr); +} + +/** + * igc_get_nfc_rule() - Get NFC rule + * @adapter: Pointer to adapter + * @location: Rule location + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: Pointer to NFC rule at @location. If not found, NULL. + */ +struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, + u32 location) +{ + struct igc_nfc_rule *rule; + + list_for_each_entry(rule, &adapter->nfc_rule_list, list) { + if (rule->location == location) + return rule; + if (rule->location > location) + break; + } + + return NULL; +} + +/** + * igc_del_nfc_rule() - Delete NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be deleted + * + * Disable NFC rule in hardware and delete it from adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + */ +void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + igc_disable_nfc_rule(adapter, rule); + + list_del(&rule->list); + adapter->nfc_rule_count--; + + kfree(rule); +} + +static void igc_flush_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule, *tmp; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) + igc_del_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +/** + * igc_add_nfc_rule() - Add NFC rule + * @adapter: Pointer to adapter + * @rule: Pointer to rule to be added + * + * Enable NFC rule in hardware and add it to adapter. + * + * Context: Expects adapter->nfc_rule_lock to be held by caller. + * + * Return: 0 on success, negative errno on failure. + */ +int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) +{ + struct igc_nfc_rule *pred, *cur; + int err; + + err = igc_enable_nfc_rule(adapter, rule); + if (err) + return err; + + pred = NULL; + list_for_each_entry(cur, &adapter->nfc_rule_list, list) { + if (cur->location >= rule->location) + break; + pred = cur; + } + + list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); + adapter->nfc_rule_count++; + return 0; +} + +static void igc_restore_nfc_rules(struct igc_adapter *adapter) +{ + struct igc_nfc_rule *rule; + + mutex_lock(&adapter->nfc_rule_lock); + + list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) + igc_enable_nfc_rule(adapter, rule); + + mutex_unlock(&adapter->nfc_rule_lock); +} + +static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); +} + +static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr); + return 0; +} + +/** + * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + */ +static void igc_set_rx_mode(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; + int count; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= IGC_RCTL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igc_write_mc_addr_list(netdev); + if (count < 0) + rctl |= IGC_RCTL_MPE; + } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync)) + rctl |= IGC_RCTL_UPE; + + /* update state of unicast and multicast */ + rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); + wr32(IGC_RCTL, rctl); + +#if (PAGE_SIZE < 8192) + if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) + rlpml = IGC_MAX_FRAME_BUILD_SKB; +#endif + wr32(IGC_RLPML, rlpml); +} + +/** + * igc_configure - configure the hardware for RX and TX + * @adapter: private board structure + */ +static void igc_configure(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i = 0; + + igc_get_hw_control(adapter); + igc_set_rx_mode(netdev); + + igc_restore_vlan(adapter); + + igc_setup_tctl(adapter); + igc_setup_mrqc(adapter); + igc_setup_rctl(adapter); + + igc_set_default_mac_filter(adapter); + igc_restore_nfc_rules(adapter); + + igc_configure_tx(adapter); + igc_configure_rx(adapter); + + igc_rx_fifo_flush_base(&adapter->hw); + + /* call igc_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); + } +} + +/** + * igc_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + */ +static void igc_write_ivar(struct igc_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(IGC_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | IGC_IVAR_VALID) << offset; + + array_wr32(IGC_IVAR0, index, ivar); +} + +static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + int rx_queue = IGC_N0_QUEUE; + int tx_queue = IGC_N0_QUEUE; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case igc_i225: + if (rx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGC_N0_QUEUE) + igc_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = BIT(msix_vector); + break; + default: + WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igc_configure_msix - Configure MSI-X hardware + * @adapter: Pointer to adapter structure + * + * igc_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + */ +static void igc_configure_msix(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i, vector = 0; + u32 tmp; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case igc_i225: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | + IGC_GPIE_PBA | IGC_GPIE_EIAME | + IGC_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = BIT(vector); + tmp = (vector++ | IGC_IVAR_VALID) << 8; + + wr32(IGC_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igc_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void igc_irq_enable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; + u32 regval = rd32(IGC_EIAC); + + wr32(IGC_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(IGC_EIAM); + wr32(IGC_EIAM, regval | adapter->eims_enable_mask); + wr32(IGC_EIMS, adapter->eims_enable_mask); + wr32(IGC_IMS, ims); + } else { + wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); + } +} + +/** + * igc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void igc_irq_disable(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + u32 regval = rd32(IGC_EIAM); + + wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); + wr32(IGC_EIMC, adapter->eims_enable_mask); + regval = rd32(IGC_EIAC); + wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(IGC_IAM, 0); + wr32(IGC_IMC, ~0); + wrfl(); + + if (adapter->msix_entries) { + int vector = 0, i; + + synchronize_irq(adapter->msix_entries[vector++].vector); + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues) +{ + /* Determine if we need to pair queues. */ + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; +} + +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) +{ + return IGC_MAX_RX_QUEUES; +} + +static void igc_init_queue_configuration(struct igc_adapter *adapter) +{ + u32 max_rss_queues; + + max_rss_queues = igc_get_max_rss_queues(adapter); + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + igc_set_flag_queue_pairs(adapter, max_rss_queues); +} + +/** + * igc_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igc_free_q_vector. + */ +static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* if we're coming from igc_set_interrupt_capability, the vectors are + * not yet allocated + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + netif_napi_del(&q_vector->napi); +} + +/** + * igc_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + */ +static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) +{ + struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igc_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igc_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + */ +static void igc_free_q_vectors(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igc_reset_q_vector(adapter, v_idx); + igc_free_q_vector(adapter, v_idx); + } +} + +/** + * igc_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + */ +static void igc_update_itr(struct igc_q_vector *q_vector, + struct igc_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes / packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igc_set_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + current_itr = 0; + new_itr = IGC_4K_ITR; + goto set_itr_now; + default: + break; + } + + igc_update_itr(q_vector, &q_vector->tx); + igc_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igc_reset_interrupt_capability(struct igc_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IGC_FLAG_HAS_MSI) { + pci_disable_msi(adapter->pdev); + } + + while (v_idx--) + igc_reset_q_vector(adapter, v_idx); +} + +/** + * igc_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: Pointer to adapter structure + * @msix: boolean value for MSI-X capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static void igc_set_interrupt_capability(struct igc_adapter *adapter, + bool msix) +{ + int numvecs, i; + int err; + + if (!msix) + goto msi_only; + adapter->flags |= IGC_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + + adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), + GFP_KERNEL); + + if (!adapter->msix_entries) + return; + + /* populate entry values */ + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + igc_reset_interrupt_capability(adapter); + +msi_only: + adapter->flags &= ~IGC_FLAG_HAS_MSIX; + + adapter->rss_queues = 1; + adapter->flags |= IGC_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGC_FLAG_HAS_MSI; +} + +/** + * igc_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igc_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + */ +static void igc_update_ring_itr(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + new_val = IGC_4K_ITR; + goto set_itr_val; + default: + break; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if (avg_wire_size > 300 && avg_wire_size < 1200) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGC_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGC_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +static void igc_ring_irq_enable(struct igc_q_vector *q_vector) +{ + struct igc_adapter *adapter = q_vector->adapter; + struct igc_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if (adapter->num_q_vectors == 1) + igc_set_itr(q_vector); + else + igc_update_ring_itr(q_vector); + } + + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->msix_entries) + wr32(IGC_EIMS, q_vector->eims_value); + else + igc_irq_enable(adapter); + } +} + +static void igc_add_ring(struct igc_ring *ring, + struct igc_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igc_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + */ +static void igc_cache_ring_register(struct igc_adapter *adapter) +{ + int i = 0, j = 0; + + switch (adapter->hw.mac.type) { + case igc_i225: + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = j; + break; + } +} + +/** + * igc_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + */ +static int igc_poll(struct napi_struct *napi, int budget) +{ + struct igc_q_vector *q_vector = container_of(napi, + struct igc_q_vector, + napi); + struct igc_ring *rx_ring = q_vector->rx.ring; + bool clean_complete = true; + int work_done = 0; + + if (q_vector->tx.ring) + clean_complete = igc_clean_tx_irq(q_vector, budget); + + if (rx_ring) { + int cleaned = rx_ring->xsk_pool ? + igc_clean_rx_irq_zc(q_vector, budget) : + igc_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + if (cleaned >= budget) + clean_complete = false; + } + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igc_ring_irq_enable(q_vector); + + return min(work_done, budget - 1); +} + +/** + * igc_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int igc_alloc_q_vector(struct igc_adapter *adapter, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct igc_q_vector *q_vector; + struct igc_ring *ring; + int ring_count; + + /* igc only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); + else + memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->io_addr + IGC_EITR(0); + q_vector->itr_val = IGC_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* initialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igc_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igc_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + +/** + * igc_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +static int igc_alloc_q_vectors(struct igc_adapter *adapter) +{ + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int q_vectors = adapter->num_q_vectors; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igc_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igc_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: Pointer to adapter structure + * @msix: boolean for MSI-X capability + * + * This function initializes the interrupts and allocates all of the queues. + */ +static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) +{ + struct net_device *dev = adapter->netdev; + int err = 0; + + igc_set_interrupt_capability(adapter, msix); + + err = igc_alloc_q_vectors(adapter); + if (err) { + netdev_err(dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igc_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igc_reset_interrupt_capability(adapter); + return err; +} + +/** + * igc_sw_init - Initialize general software structures (struct igc_adapter) + * @adapter: board private structure to initialize + * + * igc_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int igc_sw_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGC_DEFAULT_TXD; + adapter->rx_ring_count = IGC_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGC_DEFAULT_ITR; + adapter->tx_itr_setting = IGC_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; + + /* adjust max frame to be at least the size of a standard frame */ + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + mutex_init(&adapter->nfc_rule_lock); + INIT_LIST_HEAD(&adapter->nfc_rule_list); + adapter->nfc_rule_count = 0; + + spin_lock_init(&adapter->stats64_lock); + spin_lock_init(&adapter->qbv_tx_lock); + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGC_FLAG_HAS_MSIX; + + igc_init_queue_configuration(adapter); + + /* This call may decrease the number of queues */ + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igc_irq_disable(adapter); + + set_bit(__IGC_DOWN, &adapter->state); + + return 0; +} + +/** + * igc_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + */ +void igc_up(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i = 0; + + /* hardware has been reset, we need to reload some things */ + igc_configure(adapter); + + clear_bit(__IGC_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + + if (adapter->msix_entries) + igc_configure_msix(adapter); + else + igc_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); +} + +/** + * igc_update_stats - Update the board statistics counters + * @adapter: board private structure + */ +void igc_update_stats(struct igc_adapter *adapter) +{ + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.tlpic += rd32(IGC_TLPIC); + adapter->stats.rlpic += rd32(IGC_RLPIC); + adapter->stats.hgptc += rd32(IGC_HGPTC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + adapter->stats.colc += rd32(IGC_RERC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + + adapter->stats.iac += rd32(IGC_IAC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped */ + net_stats->tx_dropped = adapter->stats.txdrop; + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); +} + +/** + * igc_down - Close the interface + * @adapter: board private structure + */ +void igc_down(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i = 0; + + set_bit(__IGC_DOWN, &adapter->state); + + igc_ptp_suspend(adapter); + + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } + /* set trans_start so we don't get spurious watchdogs during reset */ + netif_trans_update(netdev); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); + + igc_irq_disable(adapter); + } + + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igc_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; + + igc_disable_all_tx_rings_hw(adapter); + igc_clean_all_tx_rings(adapter); + igc_clean_all_rx_rings(adapter); +} + +void igc_reinit_locked(struct igc_adapter *adapter) +{ + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igc_down(adapter); + igc_up(adapter); + clear_bit(__IGC_RESETTING, &adapter->state); +} + +static void igc_reset_task(struct work_struct *work) +{ + struct igc_adapter *adapter; + + adapter = container_of(work, struct igc_adapter, reset_task); + + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGC_DOWN, &adapter->state) || + test_bit(__IGC_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + + igc_rings_dump(adapter); + igc_regs_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igc_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * igc_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int igc_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { + netdev_dbg(netdev, "Jumbo frames not supported with XDP"); + return -EINVAL; + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igc_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igc_down(adapter); + + netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igc_up(adapter); + else + igc_reset(adapter); + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +/** + * igc_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * @txqueue: queue number that timed out + **/ +static void igc_tx_timeout(struct net_device *netdev, + unsigned int __always_unused txqueue) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + wr32(IGC_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +/** + * igc_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + * The statistics are updated here and also from the timer callback. + */ +static void igc_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + if (!test_bit(__IGC_RESETTING, &adapter->state)) + igc_update_stats(adapter); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); +} + +static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igc_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igc_vlan_mode(netdev, features); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) + igc_flush_nfc_rules(adapter); + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; +} + +static netdev_features_t +igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void igc_tsync_interrupt(struct igc_adapter *adapter) +{ + u32 ack, tsauxc, sec, nsec, tsicr; + struct igc_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + + tsicr = rd32(IGC_TSICR); + ack = 0; + + if (tsicr & IGC_TSICR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_SYS_WRAP; + } + + if (tsicr & IGC_TSICR_TXTS) { + /* retrieve hardware timestamp */ + igc_ptp_tx_tstamp_event(adapter); + ack |= IGC_TSICR_TXTS; + } + + if (tsicr & IGC_TSICR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT0; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT0; + } + + if (tsicr & IGC_TSICR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); + wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(IGC_TSAUXC); + tsauxc |= IGC_TSAUXC_EN_TT1; + wr32(IGC_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= IGC_TSICR_TT1; + } + + if (tsicr & IGC_TSICR_AUTT0) { + nsec = rd32(IGC_AUXSTMPL0); + sec = rd32(IGC_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT0; + } + + if (tsicr & IGC_TSICR_AUTT1) { + nsec = rd32(IGC_AUXSTMPL1); + sec = rd32(IGC_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * NSEC_PER_SEC + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= IGC_TSICR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(IGC_TSICR, ack); +} + +/** + * igc_msix_other - msix other interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + */ +static irqreturn_t igc_msix_other(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_hw *hw = &adapter->hw; + u32 icr = rd32(IGC_ICR); + + /* reading ICR causes bit 31 of EICR to be cleared */ + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & IGC_ICR_LSC) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + wr32(IGC_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igc_write_itr(struct igc_q_vector *q_vector) +{ + u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = IGC_ITR_VAL_MASK; + + itr_val |= IGC_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igc_msix_ring(int irq, void *data) +{ + struct igc_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igc_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_request_msix - Initialize MSI-X interrupts + * @adapter: Pointer to adapter structure + * + * igc_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + */ +static int igc_request_msix(struct igc_adapter *adapter) +{ + unsigned int num_q_vectors = adapter->num_q_vectors; + int i = 0, err = 0, vector = 0, free_vector = 0; + struct net_device *netdev = adapter->netdev; + + err = request_irq(adapter->msix_entries[vector].vector, + &igc_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igc_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igc_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igc_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: Pointer to adapter structure + * + * This function resets the device so that it has 0 rx queues, tx queues, and + * MSI-X interrupts allocated. + */ +static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) +{ + igc_free_q_vectors(adapter); + igc_reset_interrupt_capability(adapter); +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igc_update_phy_info(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); + + igc_get_phy_info(&adapter->hw); +} + +/** + * igc_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + */ +bool igc_has_link(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the igc_check_for_link establishes link + * for copper adapters ONLY + */ + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + + if (hw->mac.type == igc_i225) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +/** + * igc_watchdog - Timer Call-back + * @t: timer for the watchdog + */ +static void igc_watchdog(struct timer_list *t) +{ + struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igc_watchdog_task(struct work_struct *work) +{ + struct igc_adapter *adapter = container_of(work, + struct igc_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + struct igc_phy_info *phy = &hw->phy; + u16 phy_data, retry_count = 20; + u32 link; + int i; + + link = igc_has_link(adapter); + + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + if (link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(IGC_CTRL); + /* Link status message must follow this format */ + netdev_info(netdev, + "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & IGC_CTRL_TFCE) && + (ctrl & IGC_CTRL_RFCE) ? "RX/TX" : + (ctrl & IGC_CTRL_RFCE) ? "RX" : + (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGC_FLAG_EEE) && + adapter->link_duplex == HALF_DUPLEX) { + netdev_info(netdev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); + adapter->hw.dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igc_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + case SPEED_1000: + case SPEED_2500: + adapter->tx_timeout_factor = 1; + break; + } + + /* Once the launch time has been set on the wire, there + * is a delay before the link speed can be determined + * based on link-up activity. Write into the register + * as soon as we know the correct link speed. + */ + igc_tsn_adjust_txtime_offset(adapter); + + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + netdev_err(netdev, "exceed max 2 second\n"); + } + } else { + netdev_err(netdev, "read 1000Base-T Status Reg\n"); + } +no_wait: + netif_carrier_on(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* Links status message must follow this format */ + netdev_info(netdev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + } + } + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(IGC_EICS, eics); + } else { + wr32(IGC_ICS, IGC_ICS_RXDMT0); + } + + igc_ptp_tx_hang(adapter); + + /* Reset the timer */ + if (!test_bit(__IGC_DOWN, &adapter->state)) { + if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +/** + * igc_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr_msi(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(IGC_ICR); + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igc_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t igc_intr(int irq, void *data) +{ + struct igc_adapter *adapter = data; + struct igc_q_vector *q_vector = adapter->q_vector[0]; + struct igc_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(IGC_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & IGC_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igc_write_itr(q_vector); + + if (icr & IGC_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & IGC_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGC_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igc_free_irq(struct igc_adapter *adapter) +{ + if (adapter->msix_entries) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igc_request_irq - initialize interrupts + * @adapter: Pointer to adapter structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + */ +static int igc_request_irq(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + err = igc_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + igc_clear_interrupt_scheme(adapter); + err = igc_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + igc_setup_all_tx_resources(adapter); + igc_setup_all_rx_resources(adapter); + igc_configure(adapter); + } + + igc_assign_vector(adapter->q_vector[0], 0); + + if (adapter->flags & IGC_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, &igc_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igc_reset_interrupt_capability(adapter); + adapter->flags &= ~IGC_FLAG_HAS_MSI; + } + + err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + netdev_err(netdev, "Error %d getting interrupt\n", err); + +request_done: + return err; +} + +/** + * __igc_open - Called when a network interface is made active + * @netdev: network interface device structure + * @resuming: boolean indicating if the device is resuming + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int __igc_open(struct net_device *netdev, bool resuming) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + int err = 0; + int i = 0; + + /* disallow open during test */ + + if (test_bit(__IGC_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = igc_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igc_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igc_power_up_link(adapter); + + igc_configure(adapter); + + err = igc_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + + clear_bit(__IGC_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&adapter->q_vector[i]->napi); + + /* Clear any pending interrupts. */ + rd32(IGC_ICR); + igc_irq_enable(adapter); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + netif_tx_start_all_queues(netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = true; + schedule_work(&adapter->watchdog_task); + + return IGC_SUCCESS; + +err_set_queues: + igc_free_irq(adapter); +err_req_irq: + igc_release_hw_control(adapter); + igc_power_down_phy_copper_base(&adapter->hw); + igc_free_all_rx_resources(adapter); +err_setup_rx: + igc_free_all_tx_resources(adapter); +err_setup_tx: + igc_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +int igc_open(struct net_device *netdev) +{ + return __igc_open(netdev, false); +} + +/** + * __igc_close - Disables a network interface + * @netdev: network interface device structure + * @suspending: boolean indicating the device is suspending + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int __igc_close(struct net_device *netdev, bool suspending) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igc_down(adapter); + + igc_release_hw_control(adapter); + + igc_free_irq(adapter); + + igc_free_all_tx_resources(adapter); + igc_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +int igc_close(struct net_device *netdev) +{ + if (netif_device_present(netdev) || netdev->dismantle) + return __igc_close(netdev, false); + return 0; +} + +/** + * igc_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return igc_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igc_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, + bool enable) +{ + struct igc_ring *ring; + + if (queue < 0 || queue >= adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + +static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) +{ + struct timespec64 b; + + b = ktime_to_timespec64(base_time); + + return timespec64_compare(now, &b) > 0; +} + +static bool validate_schedule(struct igc_adapter *adapter, + const struct tc_taprio_qopt_offload *qopt) +{ + int queue_uses[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; + struct timespec64 now; + size_t n; + + if (qopt->cycle_time_extension) + return false; + + igc_ptp_read(adapter, &now); + + /* If we program the controller's BASET registers with a time + * in the future, it will hold all the packets until that + * time, causing a lot of TX Hangs, so to avoid that, we + * reject schedules that would start in the future. + * Note: Limitation above is no longer in i226. + */ + if (!is_base_time_past(qopt->base_time, &now) && + igc_is_device_id_i225(hw)) + return false; + + for (n = 0; n < qopt->num_entries; n++) { + const struct tc_taprio_sched_entry *e, *prev; + int i; + + prev = n ? &qopt->entries[n - 1] : NULL; + e = &qopt->entries[n]; + + /* i225 only supports "global" frame preemption + * settings. + */ + if (e->command != TC_TAPRIO_CMD_SET_GATES) + return false; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (e->gate_mask & BIT(i)) { + queue_uses[i]++; + + /* There are limitations: A single queue cannot + * be opened and closed multiple times per cycle + * unless the gate stays open. Check for it. + */ + if (queue_uses[i] > 1 && + !(prev->gate_mask & BIT(i))) + return false; + } + } + + return true; +} + +static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_qbv_clear_schedule(struct igc_adapter *adapter) +{ + unsigned long flags; + int i; + + adapter->base_time = 0; + adapter->cycle_time = NSEC_PER_SEC; + adapter->taprio_offload_enable = false; + adapter->qbv_config_change_errors = 0; + adapter->qbv_count = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + ring->max_sdu = 0; + } + + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + + adapter->qbv_transition = false; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->oper_gate_closed = false; + ring->admin_gate_closed = false; + } + + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + + return 0; +} + +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ + igc_qbv_clear_schedule(adapter); + + return 0; +} + +static void igc_taprio_stats(struct net_device *dev, + struct tc_taprio_qopt_stats *stats) +{ + /* When Strict_End is enabled, the tx_overruns counter + * will always be zero. + */ + stats->tx_overruns = 0; +} + +static void igc_taprio_queue_stats(struct net_device *dev, + struct tc_taprio_qopt_queue_stats *queue_stats) +{ + struct tc_taprio_qopt_stats *stats = &queue_stats->stats; + + /* When Strict_End is enabled, the tx_overruns counter + * will always be zero. + */ + stats->tx_overruns = 0; +} + +static int igc_save_qbv_schedule(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + bool queue_configured[IGC_MAX_TX_QUEUES] = { }; + struct igc_hw *hw = &adapter->hw; + u32 start_time = 0, end_time = 0; + struct timespec64 now; + unsigned long flags; + size_t n; + int i; + + switch (qopt->cmd) { + case TAPRIO_CMD_REPLACE: + break; + case TAPRIO_CMD_DESTROY: + return igc_tsn_clear_schedule(adapter); + case TAPRIO_CMD_STATS: + igc_taprio_stats(adapter->netdev, &qopt->stats); + return 0; + case TAPRIO_CMD_QUEUE_STATS: + igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); + return 0; + default: + return -EOPNOTSUPP; + } + + if (qopt->base_time < 0) + return -ERANGE; + + if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) + return -EALREADY; + + if (!validate_schedule(adapter, qopt)) + return -EINVAL; + + adapter->cycle_time = qopt->cycle_time; + adapter->base_time = qopt->base_time; + adapter->taprio_offload_enable = true; + + igc_ptp_read(adapter, &now); + + for (n = 0; n < qopt->num_entries; n++) { + struct tc_taprio_sched_entry *e = &qopt->entries[n]; + + end_time += e->interval; + + /* If any of the conditions below are true, we need to manually + * control the end time of the cycle. + * 1. Qbv users can specify a cycle time that is not equal + * to the total GCL intervals. Hence, recalculation is + * necessary here to exclude the time interval that + * exceeds the cycle time. + * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, + * once the end of the list is reached, it will switch + * to the END_OF_CYCLE state and leave the gates in the + * same state until the next cycle is started. + */ + if (end_time > adapter->cycle_time || + n + 1 == qopt->num_entries) + end_time = adapter->cycle_time; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!(e->gate_mask & BIT(i))) + continue; + + /* Check whether a queue stays open for more than one + * entry. If so, keep the start and advance the end + * time. + */ + if (!queue_configured[i]) + ring->start_time = start_time; + ring->end_time = end_time; + + if (ring->start_time >= adapter->cycle_time) + queue_configured[i] = false; + else + queue_configured[i] = true; + } + + start_time += e->interval; + } + + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + + /* Check whether a queue gets configured. + * If not, set the start and end time to be end time. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (!is_base_time_past(qopt->base_time, &now)) { + ring->admin_gate_closed = false; + } else { + ring->oper_gate_closed = false; + ring->admin_gate_closed = false; + } + + if (!queue_configured[i]) { + if (!is_base_time_past(qopt->base_time, &now)) + ring->admin_gate_closed = true; + else + ring->oper_gate_closed = true; + + ring->start_time = end_time; + ring->end_time = end_time; + } + } + + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + struct net_device *dev = adapter->netdev; + + if (qopt->max_sdu[i]) + ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; + else + ring->max_sdu = 0; + } + + return 0; +} + +static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + err = igc_save_qbv_schedule(adapter, qopt); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; + struct net_device *netdev = adapter->netdev; + struct igc_ring *ring; + int i; + + /* i225 has two sets of credit-based shaper logic. + * Supporting it only on the top two priority queues + */ + if (queue < 0 || queue > 1) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + for (i = 0; i < IGC_MAX_SR_QUEUES; i++) + if (adapter->tx_ring[i]) + cbs_status[i] = adapter->tx_ring[i]->cbs_enable; + + /* CBS should be enabled on the highest priority queue first in order + * for the CBS algorithm to operate as intended. + */ + if (enable) { + if (queue == 1 && !cbs_status[0]) { + netdev_err(netdev, + "Enabling CBS on queue1 before queue0\n"); + return -EINVAL; + } + } else { + if (queue == 0 && cbs_status[1]) { + netdev_err(netdev, + "Disabling CBS on queue0 before queue1\n"); + return -EINVAL; + } + } + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static int igc_tsn_enable_cbs(struct igc_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + +static int igc_tc_query_caps(struct igc_adapter *adapter, + struct tc_query_caps_base *base) +{ + struct igc_hw *hw = &adapter->hw; + + switch (base->type) { + case TC_SETUP_QDISC_TAPRIO: { + struct tc_taprio_caps *caps = base->caps; + + caps->broken_mqprio = true; + + if (hw->mac.type == igc_i225) { + caps->supports_queue_max_sdu = true; + caps->gate_mask_per_txq = true; + } + + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + adapter->tc_setup_type = type; + + switch (type) { + case TC_QUERY_CAPS: + return igc_tc_query_caps(adapter, type_data); + case TC_SETUP_QDISC_TAPRIO: + return igc_tsn_enable_qbv_scheduling(adapter, type_data); + + case TC_SETUP_QDISC_ETF: + return igc_tsn_enable_launchtime(adapter, type_data); + + case TC_SETUP_QDISC_CBS: + return igc_tsn_enable_cbs(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return igc_xdp_setup_pool(adapter, bpf->xsk.pool, + bpf->xsk.queue_id); + default: + return -EOPNOTSUPP; + } +} + +static int igc_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + int i, drops; + + if (unlikely(!netif_carrier_ok(dev))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + ring = igc_xdp_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + __netif_tx_lock(nq, cpu); + + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + + drops = 0; + for (i = 0; i < num_frames; i++) { + int err; + struct xdp_frame *xdpf = frames[i]; + + err = igc_xdp_init_tx_descriptor(ring, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + igc_flush_tx_descriptors(ring); + + __netif_tx_unlock(nq); + + return num_frames - drops; +} + +static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, + struct igc_q_vector *q_vector) +{ + struct igc_hw *hw = &adapter->hw; + u32 eics = 0; + + eics |= q_vector->eims_value; + wr32(IGC_EICS, eics); +} + +int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_q_vector *q_vector; + struct igc_ring *ring; + + if (test_bit(__IGC_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!igc_xdp_is_enabled(adapter)) + return -ENXIO; + + if (queue_id >= adapter->num_rx_queues) + return -EINVAL; + + ring = adapter->rx_ring[queue_id]; + + if (!ring->xsk_pool) + return -ENXIO; + + q_vector = adapter->q_vector[queue_id]; + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + igc_trigger_rxtxq_interrupt(adapter, q_vector); + + return 0; +} + +static const struct net_device_ops igc_netdev_ops = { + .ndo_open = igc_open, + .ndo_stop = igc_close, + .ndo_start_xmit = igc_xmit_frame, + .ndo_set_rx_mode = igc_set_rx_mode, + .ndo_set_mac_address = igc_set_mac, + .ndo_change_mtu = igc_change_mtu, + .ndo_tx_timeout = igc_tx_timeout, + .ndo_get_stats64 = igc_get_stats64, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, + .ndo_eth_ioctl = igc_ioctl, + .ndo_setup_tc = igc_setup_tc, + .ndo_bpf = igc_bpf, + .ndo_xdp_xmit = igc_xdp_xmit, + .ndo_xsk_wakeup = igc_xsk_wakeup, +}; + +/* PCIe configuration access */ +void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_read_word(adapter->pdev, reg, value); + + return IGC_SUCCESS; +} + +s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) +{ + struct igc_adapter *adapter = hw->back; + + if (!pci_is_pcie(adapter->pdev)) + return -IGC_ERR_CONFIG; + + pcie_capability_write_word(adapter->pdev, reg, *value); + + return IGC_SUCCESS; +} + +u32 igc_rd32(struct igc_hw *hw, u32 reg) +{ + struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (IGC_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igc->netdev; + + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + WARN(pci_device_is_present(igc->pdev), + "igc: Failed to read reg 0x%x!\n", reg); + } + + return value; +} + +/* Mapping HW RSS Type to enum xdp_rss_hash_type */ +static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = { + [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2, + [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP, + [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4, + [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP, + [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX, + [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6, + [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, + [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP, + [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP, + [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX, + [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ + [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */ + [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */ + [13] = XDP_RSS_TYPE_NONE, + [14] = XDP_RSS_TYPE_NONE, + [15] = XDP_RSS_TYPE_NONE, +}; + +static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) +{ + const struct igc_xdp_buff *ctx = (void *)_ctx; + + if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) + return -ENODATA; + + *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); + *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; + + return 0; +} + +static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) +{ + const struct igc_xdp_buff *ctx = (void *)_ctx; + + if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { + *timestamp = ctx->rx_ts; + + return 0; + } + + return -ENODATA; +} + +static const struct xdp_metadata_ops igc_xdp_metadata_ops = { + .xmo_rx_hash = igc_xdp_rx_hash, + .xmo_rx_timestamp = igc_xdp_rx_timestamp, +}; + +static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) +{ + struct igc_adapter *adapter = container_of(timer, struct igc_adapter, + hrtimer); + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&adapter->qbv_tx_lock, flags); + + adapter->qbv_transition = true; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->admin_gate_closed) { + tx_ring->admin_gate_closed = false; + tx_ring->oper_gate_closed = true; + } else { + tx_ring->oper_gate_closed = false; + } + } + adapter->qbv_transition = false; + + spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); + + return HRTIMER_NORESTART; +} + +/** + * igc_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igc_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igc_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring the adapter private structure, + * and a hardware reset occur. + */ +static int igc_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct igc_adapter *adapter; + struct net_device *netdev; + struct igc_hw *hw; + const struct igc_info *ei = igc_info_tbl[ent->driver_data]; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + + err = pci_request_mem_regions(pdev, igc_driver_name); + if (err) + goto err_pci_reg; + + err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), + IGC_MAX_TX_QUEUES); + + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->port_num = hw->bus.func; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = pci_save_state(pdev); + if (err) + goto err_ioremap; + + err = -EIO; + adapter->io_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!adapter->io_addr) + goto err_ioremap; + + /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; + + netdev->netdev_ops = &igc_netdev_ops; + netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; + igc_ethtool_set_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC and PHY function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* Add supported features to the features list*/ + netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + netdev->features |= NETIF_F_TSO_ECN; + netdev->features |= NETIF_F_RXHASH; + netdev->features |= NETIF_F_RXCSUM; + netdev->features |= NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SCTP_CRC; + netdev->features |= NETIF_F_HW_TC; + +#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; + + /* setup the private structure */ + err = igc_sw_init(adapter); + if (err) + goto err_sw_init; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= netdev->features; + + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY; + + /* MTU range: 68 - 9216 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + if (igc_get_flash_presence_i225(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } + + eth_hw_addr_set(netdev, hw->mac.addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + /* configure RXPBSIZE and TXPBSIZE */ + wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + + timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); + + INIT_WORK(&adapter->reset_task, igc_reset_task); + INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); + + hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + adapter->hrtimer.function = &igc_qbv_scheduling_timer; + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0xaf; + + hw->fc.requested_mode = igc_fc_default; + hw->fc.current_mode = igc_fc_default; + + /* By default, support wake on port A */ + adapter->flags |= IGC_FLAG_WOL_SUPPORTED; + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) + adapter->wol |= IGC_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGC_FLAG_WOL_SUPPORTED); + + igc_ptp_init(adapter); + + igc_tsn_clear_schedule(adapter); + + /* reset the hardware with the new settings */ + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + strncpy(netdev->name, "eth%d", IFNAMSIZ); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + + /* print pcie link status and MAC address */ + pcie_print_link_status(pdev); + netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + /* Disable EEE for internal PHY devices */ + hw->dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + igc_set_eee_i225(hw, false, false, false); + + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + igc_release_hw_control(adapter); +err_eeprom: + if (!igc_check_reset_block(hw)) + igc_reset_phy(hw); +err_sw_init: + igc_clear_interrupt_scheme(adapter); + iounmap(adapter->io_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igc_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igc_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void igc_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_noresume(&pdev->dev); + + igc_flush_nfc_rules(adapter); + + igc_ptp_stop(adapter); + + pci_disable_ptm(pdev); + pci_clear_master(pdev); + + set_bit(__IGC_DOWN, &adapter->state); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + hrtimer_cancel(&adapter->hrtimer); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + unregister_netdev(netdev); + + igc_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, adapter->io_addr); + pci_release_mem_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; + struct igc_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + bool wake; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igc_close(netdev, true); + + igc_ptp_suspend(adapter); + + igc_clear_interrupt_scheme(adapter); + rtnl_unlock(); + + status = rd32(IGC_STATUS); + if (status & IGC_STATUS_LU) + wufc &= ~IGC_WUFC_LNKC; + + if (wufc) { + igc_setup_rctl(adapter); + igc_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IGC_WUFC_MC) { + rctl = rd32(IGC_RCTL); + rctl |= IGC_RCTL_MPE; + wr32(IGC_RCTL, rctl); + } + + ctrl = rd32(IGC_CTRL); + ctrl |= IGC_CTRL_ADVD3WUC; + wr32(IGC_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igc_disable_pcie_master(hw); + + wr32(IGC_WUC, IGC_WUC_PME_EN); + wr32(IGC_WUFC, wufc); + } else { + wr32(IGC_WUC, 0); + wr32(IGC_WUFC, 0); + } + + wake = wufc || adapter->en_mng_pt; + if (!wake) + igc_power_down_phy_copper_base(&adapter->hw); + else + igc_power_up_link(adapter); + + if (enable_wake) + *enable_wake = wake; + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igc_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int __maybe_unused igc_runtime_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 1); +} + +static void igc_deliver_wake_packet(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct sk_buff *skb; + u32 wupl; + + wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; + + /* WUPM stores only the first 128 bytes of the wake packet. + * Read the packet only if we have the whole thing. + */ + if (wupl == 0 || wupl > IGC_WUPM_BYTES) + return; + + skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES); + if (!skb) + return; + + skb_put(skb, wupl); + + /* Ensure reads are 32-bit aligned */ + wupl = roundup(wupl, 4); + + memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); +} + +static int __maybe_unused igc_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 err, val; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igc_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + + val = rd32(IGC_WUS); + if (val & WAKE_PKT_WUS) + igc_deliver_wake_packet(netdev); + + wr32(IGC_WUS, ~0); + + rtnl_lock(); + if (!err && netif_running(netdev)) + err = __igc_open(netdev, true); + + if (!err) + netif_device_attach(netdev); + rtnl_unlock(); + + return err; +} + +static int __maybe_unused igc_runtime_resume(struct device *dev) +{ + return igc_resume(dev); +} + +static int __maybe_unused igc_suspend(struct device *dev) +{ + return __igc_shutdown(to_pci_dev(dev), NULL, 0); +} + +static int __maybe_unused igc_runtime_idle(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igc_adapter *adapter = netdev_priv(netdev); + + if (!igc_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} +#endif /* CONFIG_PM */ + +static void igc_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igc_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * igc_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current PCI connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igc_down(adapter); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igc_io_slot_reset - called after the PCI bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igc_resume routine. + **/ +static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + netdev_err(netdev, "Could not re-enable PCI device after reset\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* In case of PCI error, adapter loses its HW address + * so we should re-assign it here. + */ + hw->hw_addr = adapter->io_addr; + + igc_reset(adapter); + wr32(IGC_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + return result; +} + +/** + * igc_io_resume - called when traffic can start to flow again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igc_resume routine. + */ +static void igc_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igc_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + if (igc_open(netdev)) { + netdev_err(netdev, "igc_open failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igc_get_hw_control(adapter); + rtnl_unlock(); +} + +static const struct pci_error_handlers igc_err_handler = { + .error_detected = igc_io_error_detected, + .slot_reset = igc_io_slot_reset, + .resume = igc_io_resume, +}; + +#ifdef CONFIG_PM +static const struct dev_pm_ops igc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) + SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, + igc_runtime_idle) +}; +#endif + +static struct pci_driver igc_driver = { + .name = igc_driver_name, + .id_table = igc_pci_tbl, + .probe = igc_probe, + .remove = igc_remove, +#ifdef CONFIG_PM + .driver.pm = &igc_pm_ops, +#endif + .shutdown = igc_shutdown, + .err_handler = &igc_err_handler, +}; + +/** + * igc_reinit_queues - return error + * @adapter: pointer to adapter structure + */ +int igc_reinit_queues(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (netif_running(netdev)) + igc_close(netdev); + + igc_reset_interrupt_capability(adapter); + + if (igc_init_interrupt_scheme(adapter, true)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igc_open(netdev); + + return err; +} + +/** + * igc_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + */ +struct net_device *igc_get_hw_dev(struct igc_hw *hw) +{ + struct igc_adapter *adapter = hw->back; + + return adapter->netdev; +} + +static void igc_disable_rx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 rxdctl; + + rxdctl = rd32(IGC_RXDCTL(idx)); + rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; + rxdctl |= IGC_RXDCTL_SWFLUSH; + wr32(IGC_RXDCTL(idx), rxdctl); +} + +void igc_disable_rx_ring(struct igc_ring *ring) +{ + igc_disable_rx_ring_hw(ring); + igc_clean_rx_ring(ring); +} + +void igc_enable_rx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_rx_ring(adapter, ring); + + if (ring->xsk_pool) + igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring)); + else + igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); +} + +void igc_disable_tx_ring(struct igc_ring *ring) +{ + igc_disable_tx_ring_hw(ring); + igc_clean_tx_ring(ring); +} + +void igc_enable_tx_ring(struct igc_ring *ring) +{ + struct igc_adapter *adapter = ring->q_vector->adapter; + + igc_configure_tx_ring(adapter, ring); +} + +/** + * igc_init_module - Driver Registration Routine + * + * igc_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init igc_init_module(void) +{ + int ret; + + pr_info("%s\n", igc_driver_string); + pr_info("%s\n", igc_copyright); + + ret = pci_register_driver(&igc_driver); + return ret; +} + +module_init(igc_init_module); + +/** + * igc_exit_module - Driver Exit Cleanup Routine + * + * igc_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit igc_exit_module(void) +{ + pci_unregister_driver(&igc_driver); +} + +module_exit(igc_exit_module); +/* igc_main.c */ diff --git a/devices/igc/igc_nvm-6.4-ethercat.c b/devices/igc/igc_nvm-6.4-ethercat.c new file mode 100644 index 00000000..8b4020f5 --- /dev/null +++ b/devices/igc/igc_nvm-6.4-ethercat.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_mac-6.4-ethercat.h" +#include "igc_nvm-6.4-ethercat.h" + +/** + * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + */ +static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == IGC_NVM_POLL_READ) + reg = rd32(IGC_EERD); + else + reg = rd32(IGC_EEWR); + + if (reg & IGC_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igc_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +s32 igc_acquire_nvm(struct igc_hw *hw) +{ + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; + u32 eecd = rd32(IGC_EECD); + s32 ret_val = 0; + + wr32(IGC_EECD, eecd | IGC_EECD_REQ); + eecd = rd32(IGC_EECD); + + while (timeout) { + if (eecd & IGC_EECD_GNT) + break; + udelay(5); + eecd = rd32(IGC_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -IGC_ERR_NVM; + } + + return ret_val; +} + +/** + * igc_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + */ +void igc_release_nvm(struct igc_hw *hw) +{ + u32 eecd; + + eecd = rd32(IGC_EECD); + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); +} + +/** + * igc_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + */ +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + + IGC_NVM_RW_REG_START; + + wr32(IGC_EERD, eerd); + ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igc_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + */ +s32 igc_read_mac_addr(struct igc_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(IGC_RAH(0)); + rar_low = rd32(IGC_RAL(0)); + + for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igc_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +s32 igc_validate_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val = 0; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + */ +s32 igc_update_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} diff --git a/devices/igc/igc_nvm-6.4-ethercat.h b/devices/igc/igc_nvm-6.4-ethercat.h new file mode 100644 index 00000000..f9fc2e9c --- /dev/null +++ b/devices/igc/igc_nvm-6.4-ethercat.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_NVM_H_ +#define _IGC_NVM_H_ + +s32 igc_acquire_nvm(struct igc_hw *hw); +void igc_release_nvm(struct igc_hw *hw); +s32 igc_read_mac_addr(struct igc_hw *hw); +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_validate_nvm_checksum(struct igc_hw *hw); +s32 igc_update_nvm_checksum(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_nvm-6.4-orig.c b/devices/igc/igc_nvm-6.4-orig.c new file mode 100644 index 00000000..58f81aba --- /dev/null +++ b/devices/igc/igc_nvm-6.4-orig.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_mac.h" +#include "igc_nvm.h" + +/** + * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + */ +static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == IGC_NVM_POLL_READ) + reg = rd32(IGC_EERD); + else + reg = rd32(IGC_EEWR); + + if (reg & IGC_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igc_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +s32 igc_acquire_nvm(struct igc_hw *hw) +{ + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; + u32 eecd = rd32(IGC_EECD); + s32 ret_val = 0; + + wr32(IGC_EECD, eecd | IGC_EECD_REQ); + eecd = rd32(IGC_EECD); + + while (timeout) { + if (eecd & IGC_EECD_GNT) + break; + udelay(5); + eecd = rd32(IGC_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -IGC_ERR_NVM; + } + + return ret_val; +} + +/** + * igc_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + */ +void igc_release_nvm(struct igc_hw *hw) +{ + u32 eecd; + + eecd = rd32(IGC_EECD); + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); +} + +/** + * igc_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + */ +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + + IGC_NVM_RW_REG_START; + + wr32(IGC_EERD, eerd); + ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igc_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + */ +s32 igc_read_mac_addr(struct igc_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(IGC_RAH(0)); + rar_low = rd32(IGC_RAL(0)); + + for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igc_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +s32 igc_validate_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val = 0; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + */ +s32 igc_update_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} diff --git a/devices/igc/igc_nvm-6.4-orig.h b/devices/igc/igc_nvm-6.4-orig.h new file mode 100644 index 00000000..f9fc2e9c --- /dev/null +++ b/devices/igc/igc_nvm-6.4-orig.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_NVM_H_ +#define _IGC_NVM_H_ + +s32 igc_acquire_nvm(struct igc_hw *hw); +void igc_release_nvm(struct igc_hw *hw); +s32 igc_read_mac_addr(struct igc_hw *hw); +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_validate_nvm_checksum(struct igc_hw *hw); +s32 igc_update_nvm_checksum(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_nvm-6.6-ethercat.c b/devices/igc/igc_nvm-6.6-ethercat.c new file mode 100644 index 00000000..1b26dd4f --- /dev/null +++ b/devices/igc/igc_nvm-6.6-ethercat.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_mac-6.6-ethercat.h" +#include "igc_nvm-6.6-ethercat.h" + +/** + * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + */ +static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == IGC_NVM_POLL_READ) + reg = rd32(IGC_EERD); + else + reg = rd32(IGC_EEWR); + + if (reg & IGC_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igc_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +s32 igc_acquire_nvm(struct igc_hw *hw) +{ + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; + u32 eecd = rd32(IGC_EECD); + s32 ret_val = 0; + + wr32(IGC_EECD, eecd | IGC_EECD_REQ); + eecd = rd32(IGC_EECD); + + while (timeout) { + if (eecd & IGC_EECD_GNT) + break; + udelay(5); + eecd = rd32(IGC_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -IGC_ERR_NVM; + } + + return ret_val; +} + +/** + * igc_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + */ +void igc_release_nvm(struct igc_hw *hw) +{ + u32 eecd; + + eecd = rd32(IGC_EECD); + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); +} + +/** + * igc_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + */ +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + + IGC_NVM_RW_REG_START; + + wr32(IGC_EERD, eerd); + ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igc_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + */ +s32 igc_read_mac_addr(struct igc_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(IGC_RAH(0)); + rar_low = rd32(IGC_RAL(0)); + + for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igc_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +s32 igc_validate_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val = 0; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + */ +s32 igc_update_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} diff --git a/devices/igc/igc_nvm-6.6-ethercat.h b/devices/igc/igc_nvm-6.6-ethercat.h new file mode 100644 index 00000000..f9fc2e9c --- /dev/null +++ b/devices/igc/igc_nvm-6.6-ethercat.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_NVM_H_ +#define _IGC_NVM_H_ + +s32 igc_acquire_nvm(struct igc_hw *hw); +void igc_release_nvm(struct igc_hw *hw); +s32 igc_read_mac_addr(struct igc_hw *hw); +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_validate_nvm_checksum(struct igc_hw *hw); +s32 igc_update_nvm_checksum(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_nvm-6.6-orig.c b/devices/igc/igc_nvm-6.6-orig.c new file mode 100644 index 00000000..58f81aba --- /dev/null +++ b/devices/igc/igc_nvm-6.6-orig.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_mac.h" +#include "igc_nvm.h" + +/** + * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + */ +static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) +{ + s32 ret_val = -IGC_ERR_NVM; + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == IGC_NVM_POLL_READ) + reg = rd32(IGC_EERD); + else + reg = rd32(IGC_EEWR); + + if (reg & IGC_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igc_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -IGC_ERR_NVM (-1). + */ +s32 igc_acquire_nvm(struct igc_hw *hw) +{ + s32 timeout = IGC_NVM_GRANT_ATTEMPTS; + u32 eecd = rd32(IGC_EECD); + s32 ret_val = 0; + + wr32(IGC_EECD, eecd | IGC_EECD_REQ); + eecd = rd32(IGC_EECD); + + while (timeout) { + if (eecd & IGC_EECD_GNT) + break; + udelay(5); + eecd = rd32(IGC_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -IGC_ERR_NVM; + } + + return ret_val; +} + +/** + * igc_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + */ +void igc_release_nvm(struct igc_hw *hw) +{ + u32 eecd; + + eecd = rd32(IGC_EECD); + eecd &= ~IGC_EECD_REQ; + wr32(IGC_EECD, eecd); +} + +/** + * igc_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + */ +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct igc_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || + words == 0) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + + IGC_NVM_RW_REG_START; + + wr32(IGC_EERD, eerd); + ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igc_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + */ +s32 igc_read_mac_addr(struct igc_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(IGC_RAH(0)); + rar_low = rd32(IGC_RAL(0)); + + for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igc_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + */ +s32 igc_validate_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val = 0; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -IGC_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igc_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + */ +s32 igc_update_nvm_checksum(struct igc_hw *hw) +{ + u16 checksum = 0; + u16 i, nvm_data; + s32 ret_val; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} diff --git a/devices/igc/igc_nvm-6.6-orig.h b/devices/igc/igc_nvm-6.6-orig.h new file mode 100644 index 00000000..f9fc2e9c --- /dev/null +++ b/devices/igc/igc_nvm-6.6-orig.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_NVM_H_ +#define _IGC_NVM_H_ + +s32 igc_acquire_nvm(struct igc_hw *hw); +void igc_release_nvm(struct igc_hw *hw); +s32 igc_read_mac_addr(struct igc_hw *hw); +s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); +s32 igc_validate_nvm_checksum(struct igc_hw *hw); +s32 igc_update_nvm_checksum(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_phy-6.4-ethercat.c b/devices/igc/igc_phy-6.4-ethercat.c new file mode 100644 index 00000000..3f966d20 --- /dev/null +++ b/devices/igc/igc_phy-6.4-ethercat.c @@ -0,0 +1,795 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_phy-6.4-ethercat.h" + +/** + * igc_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return IGC_ERR_BLK_PHY_RESET (12). + */ +s32 igc_check_reset_block(struct igc_hw *hw) +{ + u32 manc; + + manc = rd32(IGC_MANC); + + return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ? + IGC_ERR_BLK_PHY_RESET : 0; +} + +/** + * igc_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + */ +s32 igc_get_phy_id(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usleep_range(200, 500); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igc_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + */ +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + u16 i, phy_status; + s32 ret_val = 0; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igc_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + */ +void igc_power_up_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igc_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + */ +void igc_power_down_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + + /* Temporary workaround - should be removed when PHY will implement + * IEEE registers as properly + */ + /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + usleep_range(1000, 2000); +} + +/** + * igc_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * A downshift is detected by querying the PHY link health. + */ +void igc_check_downshift(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + + /* speed downshift not supported */ + phy->speed_downgraded = false; +} + +/** + * igc_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + */ +s32 igc_phy_hw_reset(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u32 phpm = 0, timeout = 10000; + s32 ret_val; + u32 ctrl; + + ret_val = igc_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + phpm = rd32(IGC_I225_PHPM); + + ctrl = rd32(IGC_CTRL); + wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(IGC_CTRL, ctrl); + wrfl(); + + /* SW should guarantee 100us for the completion of the PHY reset */ + usleep_range(100, 150); + do { + phpm = rd32(IGC_I225_PHPM); + timeout--; + udelay(1); + } while (!(phpm & IGC_PHY_RST_COMP) && timeout); + + if (!timeout) + hw_dbg("Timeout is expired after a phy reset\n"); + + usleep_range(100, 150); + + phy->ops.release(hw); + +out: + return ret_val; +} + +/** + * igc_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + */ +static s32 igc_phy_setup_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 aneg_multigbt_an_ctrl = 0; + u16 mii_1000t_ctrl_reg = 0; + u16 mii_autoneg_adv_reg; + s32 ret_val; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + &aneg_multigbt_an_ctrl); + + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* We do not allow the Phy to advertise 2500 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_2500_HALF) + hw_dbg("Advertise 2500mb Half duplex request denied!\n"); + + /* Do we want to advertise 2500 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_2500_FULL) { + hw_dbg("Advertise 2500mb Full duplex\n"); + aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS; + } else { + aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case igc_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in igc_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case igc_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + aneg_multigbt_an_ctrl); + + return ret_val; +} + +/** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + u16 i, phy_status; + s32 ret_val = 0; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +static s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_ctrl; + s32 ret_val; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igc_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -IGC_ERR_PHY (-2). + */ +s32 igc_setup_copper_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igc_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igc_config_collision_dist(hw); + ret_val = igc_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igc_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + */ +static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_READ)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + *data = (u16)mdic; + +out: + return ret_val; +} + +/** + * igc_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + */ +static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to write the desired data. + */ + mdic = (((u32)data) | + (offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_WRITE)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * __igc_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + */ +static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igc_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + */ +static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 *data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igc_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + */ +static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igc_write_phy_reg_gpy - Write GPY PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_write_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_reg_gpy - Read GPY PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is MMD to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_read_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_fw_version - Read gPHY firmware version + * @hw: pointer to the HW structure + */ +u16 igc_read_phy_fw_version(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 gphy_version = 0; + u16 ret_val; + + /* NVM image version is reported as firmware version for i225 device */ + ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version); + if (ret_val) + hw_dbg("igc_phy: read wrong gphy version\n"); + + return gphy_version; +} diff --git a/devices/igc/igc_phy-6.4-ethercat.h b/devices/igc/igc_phy-6.4-ethercat.h new file mode 100644 index 00000000..06848754 --- /dev/null +++ b/devices/igc/igc_phy-6.4-ethercat.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_PHY_H_ +#define _IGC_PHY_H_ + +#include "igc_mac-6.4-ethercat.h" + +s32 igc_check_reset_block(struct igc_hw *hw); +s32 igc_phy_hw_reset(struct igc_hw *hw); +s32 igc_get_phy_id(struct igc_hw *hw); +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igc_check_downshift(struct igc_hw *hw); +s32 igc_setup_copper_link(struct igc_hw *hw); +void igc_power_up_phy_copper(struct igc_hw *hw); +void igc_power_down_phy_copper(struct igc_hw *hw); +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data); +u16 igc_read_phy_fw_version(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_phy-6.4-orig.c b/devices/igc/igc_phy-6.4-orig.c new file mode 100644 index 00000000..53b77c96 --- /dev/null +++ b/devices/igc/igc_phy-6.4-orig.c @@ -0,0 +1,795 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_phy.h" + +/** + * igc_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return IGC_ERR_BLK_PHY_RESET (12). + */ +s32 igc_check_reset_block(struct igc_hw *hw) +{ + u32 manc; + + manc = rd32(IGC_MANC); + + return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ? + IGC_ERR_BLK_PHY_RESET : 0; +} + +/** + * igc_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + */ +s32 igc_get_phy_id(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usleep_range(200, 500); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igc_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + */ +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + u16 i, phy_status; + s32 ret_val = 0; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igc_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + */ +void igc_power_up_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igc_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + */ +void igc_power_down_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + + /* Temporary workaround - should be removed when PHY will implement + * IEEE registers as properly + */ + /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + usleep_range(1000, 2000); +} + +/** + * igc_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * A downshift is detected by querying the PHY link health. + */ +void igc_check_downshift(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + + /* speed downshift not supported */ + phy->speed_downgraded = false; +} + +/** + * igc_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + */ +s32 igc_phy_hw_reset(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u32 phpm = 0, timeout = 10000; + s32 ret_val; + u32 ctrl; + + ret_val = igc_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + phpm = rd32(IGC_I225_PHPM); + + ctrl = rd32(IGC_CTRL); + wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(IGC_CTRL, ctrl); + wrfl(); + + /* SW should guarantee 100us for the completion of the PHY reset */ + usleep_range(100, 150); + do { + phpm = rd32(IGC_I225_PHPM); + timeout--; + udelay(1); + } while (!(phpm & IGC_PHY_RST_COMP) && timeout); + + if (!timeout) + hw_dbg("Timeout is expired after a phy reset\n"); + + usleep_range(100, 150); + + phy->ops.release(hw); + +out: + return ret_val; +} + +/** + * igc_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + */ +static s32 igc_phy_setup_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 aneg_multigbt_an_ctrl = 0; + u16 mii_1000t_ctrl_reg = 0; + u16 mii_autoneg_adv_reg; + s32 ret_val; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + &aneg_multigbt_an_ctrl); + + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* We do not allow the Phy to advertise 2500 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_2500_HALF) + hw_dbg("Advertise 2500mb Half duplex request denied!\n"); + + /* Do we want to advertise 2500 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_2500_FULL) { + hw_dbg("Advertise 2500mb Full duplex\n"); + aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS; + } else { + aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case igc_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in igc_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case igc_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + aneg_multigbt_an_ctrl); + + return ret_val; +} + +/** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + u16 i, phy_status; + s32 ret_val = 0; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +static s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_ctrl; + s32 ret_val; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igc_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -IGC_ERR_PHY (-2). + */ +s32 igc_setup_copper_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igc_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igc_config_collision_dist(hw); + ret_val = igc_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igc_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + */ +static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_READ)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + *data = (u16)mdic; + +out: + return ret_val; +} + +/** + * igc_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + */ +static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to write the desired data. + */ + mdic = (((u32)data) | + (offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_WRITE)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * __igc_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + */ +static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igc_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + */ +static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 *data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igc_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + */ +static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igc_write_phy_reg_gpy - Write GPY PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_write_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_reg_gpy - Read GPY PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is MMD to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_read_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_fw_version - Read gPHY firmware version + * @hw: pointer to the HW structure + */ +u16 igc_read_phy_fw_version(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 gphy_version = 0; + u16 ret_val; + + /* NVM image version is reported as firmware version for i225 device */ + ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version); + if (ret_val) + hw_dbg("igc_phy: read wrong gphy version\n"); + + return gphy_version; +} diff --git a/devices/igc/igc_phy-6.4-orig.h b/devices/igc/igc_phy-6.4-orig.h new file mode 100644 index 00000000..832a7e35 --- /dev/null +++ b/devices/igc/igc_phy-6.4-orig.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_PHY_H_ +#define _IGC_PHY_H_ + +#include "igc_mac.h" + +s32 igc_check_reset_block(struct igc_hw *hw); +s32 igc_phy_hw_reset(struct igc_hw *hw); +s32 igc_get_phy_id(struct igc_hw *hw); +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igc_check_downshift(struct igc_hw *hw); +s32 igc_setup_copper_link(struct igc_hw *hw); +void igc_power_up_phy_copper(struct igc_hw *hw); +void igc_power_down_phy_copper(struct igc_hw *hw); +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data); +u16 igc_read_phy_fw_version(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_phy-6.6-ethercat.c b/devices/igc/igc_phy-6.6-ethercat.c new file mode 100644 index 00000000..c1f4b34e --- /dev/null +++ b/devices/igc/igc_phy-6.6-ethercat.c @@ -0,0 +1,795 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_phy-6.6-ethercat.h" + +/** + * igc_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return IGC_ERR_BLK_PHY_RESET (12). + */ +s32 igc_check_reset_block(struct igc_hw *hw) +{ + u32 manc; + + manc = rd32(IGC_MANC); + + return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ? + IGC_ERR_BLK_PHY_RESET : 0; +} + +/** + * igc_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + */ +s32 igc_get_phy_id(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usleep_range(200, 500); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igc_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + */ +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + u16 i, phy_status; + s32 ret_val = 0; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igc_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + */ +void igc_power_up_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igc_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + */ +void igc_power_down_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + + /* Temporary workaround - should be removed when PHY will implement + * IEEE registers as properly + */ + /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + usleep_range(1000, 2000); +} + +/** + * igc_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * A downshift is detected by querying the PHY link health. + */ +void igc_check_downshift(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + + /* speed downshift not supported */ + phy->speed_downgraded = false; +} + +/** + * igc_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + */ +s32 igc_phy_hw_reset(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u32 phpm = 0, timeout = 10000; + s32 ret_val; + u32 ctrl; + + ret_val = igc_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + phpm = rd32(IGC_I225_PHPM); + + ctrl = rd32(IGC_CTRL); + wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(IGC_CTRL, ctrl); + wrfl(); + + /* SW should guarantee 100us for the completion of the PHY reset */ + usleep_range(100, 150); + do { + phpm = rd32(IGC_I225_PHPM); + timeout--; + udelay(1); + } while (!(phpm & IGC_PHY_RST_COMP) && timeout); + + if (!timeout) + hw_dbg("Timeout is expired after a phy reset\n"); + + usleep_range(100, 150); + + phy->ops.release(hw); + +out: + return ret_val; +} + +/** + * igc_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + */ +static s32 igc_phy_setup_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 aneg_multigbt_an_ctrl = 0; + u16 mii_1000t_ctrl_reg = 0; + u16 mii_autoneg_adv_reg; + s32 ret_val; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + &aneg_multigbt_an_ctrl); + + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* We do not allow the Phy to advertise 2500 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_2500_HALF) + hw_dbg("Advertise 2500mb Half duplex request denied!\n"); + + /* Do we want to advertise 2500 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_2500_FULL) { + hw_dbg("Advertise 2500mb Full duplex\n"); + aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS; + } else { + aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case igc_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in igc_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case igc_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + aneg_multigbt_an_ctrl); + + return ret_val; +} + +/** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + u16 i, phy_status; + s32 ret_val = 0; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +static s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_ctrl; + s32 ret_val; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igc_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -IGC_ERR_PHY (-2). + */ +s32 igc_setup_copper_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igc_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igc_config_collision_dist(hw); + ret_val = igc_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igc_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + */ +static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_READ)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + *data = (u16)mdic; + +out: + return ret_val; +} + +/** + * igc_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + */ +static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to write the desired data. + */ + mdic = (((u32)data) | + (offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_WRITE)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * __igc_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + */ +static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igc_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + */ +static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 *data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igc_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + */ +static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igc_write_phy_reg_gpy - Write GPY PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_write_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_reg_gpy - Read GPY PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is MMD to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_read_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_fw_version - Read gPHY firmware version + * @hw: pointer to the HW structure + */ +u16 igc_read_phy_fw_version(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 gphy_version = 0; + u16 ret_val; + + /* NVM image version is reported as firmware version for i225 device */ + ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version); + if (ret_val) + hw_dbg("igc_phy: read wrong gphy version\n"); + + return gphy_version; +} diff --git a/devices/igc/igc_phy-6.6-ethercat.h b/devices/igc/igc_phy-6.6-ethercat.h new file mode 100644 index 00000000..f8bf9cf5 --- /dev/null +++ b/devices/igc/igc_phy-6.6-ethercat.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_PHY_H_ +#define _IGC_PHY_H_ + +#include "igc_mac-6.6-ethercat.h" + +s32 igc_check_reset_block(struct igc_hw *hw); +s32 igc_phy_hw_reset(struct igc_hw *hw); +s32 igc_get_phy_id(struct igc_hw *hw); +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igc_check_downshift(struct igc_hw *hw); +s32 igc_setup_copper_link(struct igc_hw *hw); +void igc_power_up_phy_copper(struct igc_hw *hw); +void igc_power_down_phy_copper(struct igc_hw *hw); +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data); +u16 igc_read_phy_fw_version(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_phy-6.6-orig.c b/devices/igc/igc_phy-6.6-orig.c new file mode 100644 index 00000000..53b77c96 --- /dev/null +++ b/devices/igc/igc_phy-6.6-orig.c @@ -0,0 +1,795 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +#include "igc_phy.h" + +/** + * igc_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return IGC_ERR_BLK_PHY_RESET (12). + */ +s32 igc_check_reset_block(struct igc_hw *hw) +{ + u32 manc; + + manc = rd32(IGC_MANC); + + return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ? + IGC_ERR_BLK_PHY_RESET : 0; +} + +/** + * igc_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + */ +s32 igc_get_phy_id(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usleep_range(200, 500); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igc_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + */ +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + u16 i, phy_status; + s32 ret_val = 0; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval / 1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igc_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + */ +void igc_power_up_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igc_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + */ +void igc_power_down_phy_copper(struct igc_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + + /* Temporary workaround - should be removed when PHY will implement + * IEEE registers as properly + */ + /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + usleep_range(1000, 2000); +} + +/** + * igc_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * A downshift is detected by querying the PHY link health. + */ +void igc_check_downshift(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + + /* speed downshift not supported */ + phy->speed_downgraded = false; +} + +/** + * igc_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + */ +s32 igc_phy_hw_reset(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u32 phpm = 0, timeout = 10000; + s32 ret_val; + u32 ctrl; + + ret_val = igc_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + phpm = rd32(IGC_I225_PHPM); + + ctrl = rd32(IGC_CTRL); + wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(IGC_CTRL, ctrl); + wrfl(); + + /* SW should guarantee 100us for the completion of the PHY reset */ + usleep_range(100, 150); + do { + phpm = rd32(IGC_I225_PHPM); + timeout--; + udelay(1); + } while (!(phpm & IGC_PHY_RST_COMP) && timeout); + + if (!timeout) + hw_dbg("Timeout is expired after a phy reset\n"); + + usleep_range(100, 150); + + phy->ops.release(hw); + +out: + return ret_val; +} + +/** + * igc_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + */ +static s32 igc_phy_setup_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 aneg_multigbt_an_ctrl = 0; + u16 mii_1000t_ctrl_reg = 0; + u16 mii_autoneg_adv_reg; + s32 ret_val; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) { + /* Read the MULTI GBT AN Control Register - reg 7.32 */ + ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + &aneg_multigbt_an_ctrl); + + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* We do not allow the Phy to advertise 2500 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_2500_HALF) + hw_dbg("Advertise 2500mb Half duplex request denied!\n"); + + /* Do we want to advertise 2500 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_2500_FULL) { + hw_dbg("Advertise 2500mb Full duplex\n"); + aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS; + } else { + aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case igc_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in igc_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case igc_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case igc_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + return -IGC_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + if (phy->autoneg_mask & ADVERTISE_2500_FULL) + ret_val = phy->ops.write_reg(hw, + (STANDARD_AN_REG_MASK << + MMD_DEVADDR_SHIFT) | + ANEG_MULTIGBT_AN_CTRL, + aneg_multigbt_an_ctrl); + + return ret_val; +} + +/** + * igc_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + */ +static s32 igc_wait_autoneg(struct igc_hw *hw) +{ + u16 i, phy_status; + s32 ret_val = 0; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igc_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + */ +static s32 igc_copper_link_autoneg(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 phy_ctrl; + s32 ret_val; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igc_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igc_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igc_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -IGC_ERR_PHY (-2). + */ +s32 igc_setup_copper_link(struct igc_hw *hw) +{ + s32 ret_val = 0; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igc_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igc_config_collision_dist(hw); + ret_val = igc_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igc_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + */ +static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_READ)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + *data = (u16)mdic; + +out: + return ret_val; +} + +/** + * igc_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + */ +static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data) +{ + struct igc_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -IGC_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to write the desired data. + */ + mdic = (((u32)data) | + (offset << IGC_MDIC_REG_SHIFT) | + (phy->addr << IGC_MDIC_PHY_SHIFT) | + (IGC_MDIC_OP_WRITE)); + + wr32(IGC_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) { + udelay(50); + mdic = rd32(IGC_MDIC); + if (mdic & IGC_MDIC_READY) + break; + } + if (!(mdic & IGC_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + if (mdic & IGC_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -IGC_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * __igc_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + */ +static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igc_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + */ +static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 *data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igc_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + */ +static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, + u8 dev_addr, u16 data) +{ + return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igc_write_phy_reg_gpy - Write GPY PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_write_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_reg_gpy - Read GPY PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is MMD to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + */ +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) +{ + u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT; + s32 ret_val; + + offset = offset & GPY_REG_MASK; + + if (!dev_addr) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = igc_read_phy_reg_mdic(hw, offset, data); + hw->phy.ops.release(hw); + } else { + ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, + data); + } + + return ret_val; +} + +/** + * igc_read_phy_fw_version - Read gPHY firmware version + * @hw: pointer to the HW structure + */ +u16 igc_read_phy_fw_version(struct igc_hw *hw) +{ + struct igc_phy_info *phy = &hw->phy; + u16 gphy_version = 0; + u16 ret_val; + + /* NVM image version is reported as firmware version for i225 device */ + ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version); + if (ret_val) + hw_dbg("igc_phy: read wrong gphy version\n"); + + return gphy_version; +} diff --git a/devices/igc/igc_phy-6.6-orig.h b/devices/igc/igc_phy-6.6-orig.h new file mode 100644 index 00000000..832a7e35 --- /dev/null +++ b/devices/igc/igc_phy-6.6-orig.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_PHY_H_ +#define _IGC_PHY_H_ + +#include "igc_mac.h" + +s32 igc_check_reset_block(struct igc_hw *hw); +s32 igc_phy_hw_reset(struct igc_hw *hw); +s32 igc_get_phy_id(struct igc_hw *hw); +s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igc_check_downshift(struct igc_hw *hw); +s32 igc_setup_copper_link(struct igc_hw *hw); +void igc_power_up_phy_copper(struct igc_hw *hw); +void igc_power_down_phy_copper(struct igc_hw *hw); +s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data); +s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data); +u16 igc_read_phy_fw_version(struct igc_hw *hw); + +#endif diff --git a/devices/igc/igc_ptp-6.4-ethercat.c b/devices/igc/igc_ptp-6.4-ethercat.c new file mode 100644 index 00000000..79b42efe --- /dev/null +++ b/devices/igc/igc_ptp-6.4-ethercat.c @@ -0,0 +1,1201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc-6.4-ethercat.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +#define IGC_PTP_TX_TIMEOUT (HZ * 15) + +#define IGC_PTM_STAT_SLEEP 2 +#define IGC_PTM_STAT_TIMEOUT 100 + +/* SYSTIM read access for I225 */ +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp is latched when SYSTIML is read. */ + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igc_ptp_write_i225(struct igc_adapter *adapter, + const struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_SYSTIML, ts->tv_nsec); + wr32(IGC_SYSTIMH, ts->tv_sec); +} + +static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 14; + rate = div_u64(rate, 78125); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(IGC_TIMINCA, inca); + + return 0; +} + +static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct timespec64 now, then = ns_to_timespec64(delta); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_read(igc, &now); + now = timespec64_add(now, then); + igc_ptp_write_i225(igc, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + ptp_read_system_prets(sts); + ts->tv_nsec = rd32(IGC_SYSTIML); + ts->tv_sec = rd32(IGC_SYSTIMH); + ptp_read_system_postts(sts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_settime_i225(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_write_i225(igc, ts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGC_N_SDP] = { + IGC_CTRL_SDP0_DIR, + IGC_CTRL_SDP1_DIR, + IGC_CTRL_EXT_SDP2_DIR, + IGC_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0, + IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0, + }; + static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1, + IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1, + }; + static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0, + IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0, + }; + static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin]) + tssdp &= ~IGC_AUX0_TS_SDP_EN; + + if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin]) + tssdp &= ~IGC_AUX1_TS_SDP_EN; + + tssdp &= ~igc_ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_fc1[pin]; + else + tssdp |= igc_ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_tt1[pin]; + else + tssdp |= igc_ts_sdp_sel_tt0[pin]; + } + tssdp |= igc_ts_sdp_en[pin]; + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~igc_ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~IGC_AUX1_SEL_SDP3; + tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN; + } else { + tssdp &= ~IGC_AUX0_SEL_SDP3; + tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN; + } + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igc_adapter *igc = + container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = IGC_TSAUXC_EN_TS1; + tsim_mask = IGC_TSICR_AUTT1; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TS0; + tsim_mask = IGC_TSICR_AUTT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (on) { + igc_pin_extts(igc, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && (ns <= 70000000LL || ns == 125000000LL || + ns == 250000000LL || ns == 500000000LL)) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT1; + tsim_mask = IGC_TSICR_TT1; + } + trgttiml = IGC_TRGTTIML1; + trgttimh = IGC_TRGTTIMH1; + freqout = IGC_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT0; + tsim_mask = IGC_TSICR_TT0; + } + trgttiml = IGC_TRGTTIML0; + trgttimh = IGC_TRGTTIMH0; + freqout = IGC_FREQOUT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 | + IGC_TSAUXC_ST1); + tsim &= ~IGC_TSICR_TT1; + } else { + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 | + IGC_TSAUXC_ST0); + tsim &= ~IGC_TSICR_TT0; + } + if (on) { + struct timespec64 safe_start; + int i = rq->perout.index; + + igc_pin_perout(igc, i, pin, use_freq); + igc_ptp_read(igc, &safe_start); + + /* PPS output start time is triggered by Target time(TT) + * register. Programming any past time value into TT + * register will cause PPS to never start. Need to make + * sure we program the TT register a time ahead in + * future. There isn't a stringent need to fire PPS out + * right away. Adding +2 seconds should take care of + * corner cases. Let's say if the SYSTIML is close to + * wrap up and the timer keeps ticking as we program the + * register, adding +2seconds is safe bet. + */ + safe_start.tv_sec += 2; + + if (rq->perout.start.sec < safe_start.tv_sec) + igc->perout[i].start.tv_sec = safe_start.tv_sec; + else + igc->perout[i].start.tv_sec = rq->perout.start.sec; + igc->perout[i].start.tv_nsec = rq->perout.start.nsec; + igc->perout[i].period.tv_sec = ts.tv_sec; + igc->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, (u32)igc->perout[i].start.tv_sec); + /* For now, always select timer 0 as source. */ + wr32(trgttiml, (u32)(igc->perout[i].start.tv_nsec | + IGC_TT_IO_TIMER_SEL_SYSTIM0)); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsim = rd32(IGC_TSIM); + if (on) + tsim |= IGC_TSICR_SYS_WRAP; + else + tsim &= ~IGC_TSICR_SYS_WRAP; + igc->pps_sys_wrap_on = on; + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * Returns 0 on success. + **/ +static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + switch (adapter->hw.mac.type) { + case igc_i225: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + return -EINVAL; + } + return 0; +} + +/** + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer + * @adapter: Pointer to adapter the packet buffer belongs to + * @buf: Pointer to packet buffer + * + * This function retrieves the timestamp saved in the beginning of packet + * buffer. While two timestamps are available, one in timer0 reference and the + * other in timer1 reference, this function considers only the timestamp in + * timer0 reference. + * + * Returns timestamp value. + */ +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf) +{ + ktime_t timestamp; + u32 secs, nsecs; + int adjust; + + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. + */ + nsecs = le32_to_cpu(buf[2]); + secs = le32_to_cpu(buf[3]); + + timestamp = ktime_set(secs, nsecs); + + /* Adjust timestamp for the RX latency based on link speed */ + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_RX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_RX_LATENCY_2500; + break; + default: + adjust = 0; + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); + break; + } + + return ktime_sub_ns(timestamp, adjust); +} + +static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + wr32(IGC_TSYNCRXCTL, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + val &= ~IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = rd32(IGC_RXPBS); + val &= ~IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); +} + +static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + val = rd32(IGC_RXPBS); + val |= IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + /* FIXME: For now, only support retrieving RX timestamps from + * timer 0. + */ + val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) | + IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL | + IGC_TSYNCRXCTL_RXSYNSIG; + wr32(IGC_TSYNCRXCTL, val); +} + +static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + /* Clear the flags first to avoid new packets to be enqueued + * for TX timestamping. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + clear_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags); + } + + /* Now we can clean the pending TX timestamp requests. */ + igc_ptp_clear_tx_tstamp(adapter); + + wr32(IGC_TSYNCTXCTL, 0); +} + +static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG); + + /* Read TXSTMP registers to discard any timestamp previously stored. */ + rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); + + /* The hardware is ready to accept TX timestamp requests, + * notify the transmit path. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + set_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags); + } + +} + +/** + * igc_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, + struct hwtstamp_config *config) +{ + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + igc_ptp_disable_tx_timestamp(adapter); + break; + case HWTSTAMP_TX_ON: + igc_ptp_enable_tx_timestamp(adapter); + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + igc_ptp_disable_rx_timestamp(adapter); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + igc_ptp_enable_rx_timestamp(adapter); + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + return 0; +} + +/* Requires adapter->ptp_tx_lock held by caller. */ +static void igc_ptp_tx_timeout(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */ + rd32(IGC_TXSTMPH); + netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); +} + +void igc_ptp_tx_hang(struct igc_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + if (!adapter->ptp_tx_skb) + goto unlock; + + if (time_is_after_jiffies(adapter->ptp_tx_start + IGC_PTP_TX_TIMEOUT)) + goto unlock; + + igc_ptp_tx_timeout(adapter); + +unlock: + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +/** + * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + * + * Context: Expects adapter->ptp_tx_lock to be held by caller. + */ +static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) +{ + struct sk_buff *skb = adapter->ptp_tx_skb; + struct skb_shared_hwtstamps shhwtstamps; + struct igc_hw *hw = &adapter->hw; + u32 tsynctxctl; + int adjust = 0; + u64 regval; + + if (WARN_ON_ONCE(!skb)) + return; + + tsynctxctl = rd32(IGC_TSYNCTXCTL); + tsynctxctl &= IGC_TSYNCTXCTL_TXTT_0; + if (tsynctxctl) { + regval = rd32(IGC_TXSTMPL); + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + } else { + /* There's a bug in the hardware that could cause + * missing interrupts for TX timestamping. The issue + * is that for new interrupts to be triggered, the + * IGC_TXSTMPH_0 register must be read. + * + * To avoid discarding a valid timestamp that just + * happened at the "wrong" time, we need to confirm + * that there was no timestamp captured, we do that by + * assuming that no two timestamps in sequence have + * the same nanosecond value. + * + * So, we read the "low" register, read the "high" + * register (to latch a new timestamp) and read the + * "low" register again, if "old" and "new" versions + * of the "low" register are different, a valid + * timestamp was captured, we can read the "high" + * register again. + */ + u32 txstmpl_old, txstmpl_new; + + txstmpl_old = rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); + txstmpl_new = rd32(IGC_TXSTMPL); + + if (txstmpl_old == txstmpl_new) + return; + + regval = txstmpl_new; + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + } + if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_TX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_TX_LATENCY_2500; + break; + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + adapter->ptp_tx_skb = NULL; + + /* Notify the stack and free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igc_ptp_tx_tstamp_event + * @adapter: board private structure + * + * Called when a TX timestamp interrupt happens to retrieve the + * timestamp and send it up to the socket. + */ +void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + if (!adapter->ptp_tx_skb) + goto unlock; + + igc_ptp_tx_hwtstamp(adapter); + +unlock: + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +/** + * igc_ptp_set_ts_config - set hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + **/ +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igc_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igc_ptp_get_ts_config - get hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/* The two conditions below must be met for cross timestamping via + * PCIe PTM: + * + * 1. We have an way to convert the timestamps in the PTM messages + * to something related to the system clocks (right now, only + * X86 systems with support for the Always Running Timer allow that); + * + * 2. We have PTM enabled in the path from the device to the PCIe root port. + */ +static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) +{ + if (!IS_ENABLED(CONFIG_X86_TSC)) + return false; + + /* FIXME: it was noticed that enabling support for PCIe PTM in + * some i225-V models could cause lockups when bringing the + * interface up/down. There should be no downsides to + * disabling crosstimestamping support for i225-V, as it + * doesn't have any PTP support. That way we gain some time + * while root causing the issue. + */ + if (adapter->pdev->device == IGC_DEV_ID_I225_V) + return false; + + return pcie_ptm_enabled(adapter->pdev); +} + +static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) +{ +#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML) + return convert_art_ns_to_tsc(tstamp); +#else + return (struct system_counterval_t) { }; +#endif +} + +static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) +{ + struct net_device *netdev = adapter->netdev; + + switch (ptm_stat) { + case IGC_PTM_STAT_RET_ERR: + netdev_err(netdev, "PTM Error: Root port timeout\n"); + break; + case IGC_PTM_STAT_BAD_PTM_RES: + netdev_err(netdev, "PTM Error: Bad response, PTM Response Data expected\n"); + break; + case IGC_PTM_STAT_T4M1_OVFL: + netdev_err(netdev, "PTM Error: T4 minus T1 overflow\n"); + break; + case IGC_PTM_STAT_ADJUST_1ST: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during first PTM cycle\n"); + break; + case IGC_PTM_STAT_ADJUST_CYC: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during non-first PTM cycle\n"); + break; + default: + netdev_err(netdev, "PTM Error: Unknown error (%#x)\n", ptm_stat); + break; + } +} + +static int igc_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + u32 stat, t2_curr_h, t2_curr_l, ctrl; + struct igc_adapter *adapter = ctx; + struct igc_hw *hw = &adapter->hw; + int err, count = 100; + ktime_t t1, t2_curr; + + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); + + do { + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ + + /* To "manually" start the PTM cycle we need to clear and + * then set again the TRIG bit. + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + + /* The cycle only starts "for real" when software notifies + * that it has read the registers, this is done by setting + * VALID bit. + */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT); + if (err < 0) { + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + return err; + } + + if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) + break; + + if (stat & ~IGC_PTM_STAT_VALID) { + /* An error occurred, log it. */ + igc_ptm_log_error(adapter, stat); + /* The STAT register is write-1-to-clear (W1C), + * so write the previous error status to clear it. + */ + wr32(IGC_PTM_STAT, stat); + continue; + } + } while (--count); + + if (!count) { + netdev_err(adapter->netdev, "Exceeded number of tries for PTM cycle\n"); + return -ETIMEDOUT; + } + + t1 = ktime_set(rd32(IGC_PTM_T1_TIM0_H), rd32(IGC_PTM_T1_TIM0_L)); + + t2_curr_l = rd32(IGC_PTM_CURR_T2_L); + t2_curr_h = rd32(IGC_PTM_CURR_T2_H); + + /* FIXME: When the register that tells the endianness of the + * PTM registers are implemented, check them here and add the + * appropriate conversion. + */ + t2_curr_h = swab32(t2_curr_h); + + t2_curr = ((s64)t2_curr_h << 32 | t2_curr_l); + + *device = t1; + *system = igc_device_tstamp_to_system(t2_curr); + + return 0; +} + +static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) +{ + struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, + ptp_caps); + + return get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); +} + +/** + * igc_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igc_ptp_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + int i; + + switch (hw->mac.type) { + case igc_i225: + for (i = 0; i < IGC_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225; + adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225; + adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; + adapter->ptp_caps.settime64 = igc_ptp_settime_i225; + adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; + adapter->ptp_caps.n_per_out = IGC_N_PEROUT; + adapter->ptp_caps.n_pins = IGC_N_SDP; + adapter->ptp_caps.verify = igc_ptp_verify_pin; + + if (!igc_is_crosststamp_supported(adapter)) + break; + + adapter->ptp_caps.getcrosststamp = igc_ptp_getcrosststamp; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->ptp_tx_lock); + spin_lock_init(&adapter->tmreg_lock); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real()); + adapter->ptp_reset_start = ktime_get(); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + netdev_err(netdev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + netdev_info(netdev, "PHC added\n"); + adapter->ptp_flags |= IGC_PTP_ENABLED; + } +} + +static void igc_ptp_time_save(struct igc_adapter *adapter) +{ + igc_ptp_read(adapter, &adapter->prev_ptp_time); + adapter->ptp_reset_start = ktime_get(); +} + +static void igc_ptp_time_restore(struct igc_adapter *adapter) +{ + struct timespec64 ts = adapter->prev_ptp_time; + ktime_t delta; + + delta = ktime_sub(ktime_get(), adapter->ptp_reset_start); + + timespec64_add_ns(&ts, ktime_to_ns(delta)); + + igc_ptp_write_i225(adapter, &ts); +} + +static void igc_ptm_stop(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_EN; + + wr32(IGC_PTM_CTRL, ctrl); +} + +/** + * igc_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igc_ptp_suspend(struct igc_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + + igc_ptp_clear_tx_tstamp(adapter); + + if (pci_device_is_present(adapter->pdev)) { + igc_ptp_time_save(adapter); + igc_ptm_stop(adapter); + } +} + +/** + * igc_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igc_ptp_stop(struct igc_adapter *adapter) +{ + igc_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + netdev_info(adapter->netdev, "PHC removed\n"); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; + } +} + +/** + * igc_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igc_ptp_reset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 cycle_ctrl, ctrl; + unsigned long flags; + u32 timadj; + + /* reset the tstamp_config */ + igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case igc_i225: + timadj = rd32(IGC_TIMADJ); + timadj |= IGC_TIMADJ_ADJUST_METH; + wr32(IGC_TIMADJ, timadj); + + wr32(IGC_TSAUXC, 0x0); + wr32(IGC_TSSDP, 0x0); + wr32(IGC_TSIM, + IGC_TSICR_INTERRUPTS | + (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); + wr32(IGC_IMS, IGC_IMS_TS); + + if (!igc_is_crosststamp_supported(adapter)) + break; + + wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); + wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); + + cycle_ctrl = IGC_PTM_CYCLE_CTRL_CYC_TIME(IGC_PTM_CYC_TIME_DEFAULT); + + wr32(IGC_PTM_CYCLE_CTRL, cycle_ctrl); + + ctrl = IGC_PTM_CTRL_EN | + IGC_PTM_CTRL_START_NOW | + IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | + IGC_PTM_CTRL_TRIG; + + wr32(IGC_PTM_CTRL, ctrl); + + /* Force the first cycle to run. */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if (hw->mac.type == igc_i225) { + igc_ptp_time_restore(adapter); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); +} diff --git a/devices/igc/igc_ptp-6.4-orig.c b/devices/igc/igc_ptp-6.4-orig.c new file mode 100644 index 00000000..f0b979a7 --- /dev/null +++ b/devices/igc/igc_ptp-6.4-orig.c @@ -0,0 +1,1201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +#define IGC_PTP_TX_TIMEOUT (HZ * 15) + +#define IGC_PTM_STAT_SLEEP 2 +#define IGC_PTM_STAT_TIMEOUT 100 + +/* SYSTIM read access for I225 */ +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp is latched when SYSTIML is read. */ + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igc_ptp_write_i225(struct igc_adapter *adapter, + const struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_SYSTIML, ts->tv_nsec); + wr32(IGC_SYSTIMH, ts->tv_sec); +} + +static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 14; + rate = div_u64(rate, 78125); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(IGC_TIMINCA, inca); + + return 0; +} + +static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct timespec64 now, then = ns_to_timespec64(delta); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_read(igc, &now); + now = timespec64_add(now, then); + igc_ptp_write_i225(igc, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + ptp_read_system_prets(sts); + ts->tv_nsec = rd32(IGC_SYSTIML); + ts->tv_sec = rd32(IGC_SYSTIMH); + ptp_read_system_postts(sts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_settime_i225(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_write_i225(igc, ts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGC_N_SDP] = { + IGC_CTRL_SDP0_DIR, + IGC_CTRL_SDP1_DIR, + IGC_CTRL_EXT_SDP2_DIR, + IGC_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0, + IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0, + }; + static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1, + IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1, + }; + static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0, + IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0, + }; + static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin]) + tssdp &= ~IGC_AUX0_TS_SDP_EN; + + if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin]) + tssdp &= ~IGC_AUX1_TS_SDP_EN; + + tssdp &= ~igc_ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_fc1[pin]; + else + tssdp |= igc_ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_tt1[pin]; + else + tssdp |= igc_ts_sdp_sel_tt0[pin]; + } + tssdp |= igc_ts_sdp_en[pin]; + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~igc_ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~IGC_AUX1_SEL_SDP3; + tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN; + } else { + tssdp &= ~IGC_AUX0_SEL_SDP3; + tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN; + } + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igc_adapter *igc = + container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = IGC_TSAUXC_EN_TS1; + tsim_mask = IGC_TSICR_AUTT1; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TS0; + tsim_mask = IGC_TSICR_AUTT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (on) { + igc_pin_extts(igc, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && (ns <= 70000000LL || ns == 125000000LL || + ns == 250000000LL || ns == 500000000LL)) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT1; + tsim_mask = IGC_TSICR_TT1; + } + trgttiml = IGC_TRGTTIML1; + trgttimh = IGC_TRGTTIMH1; + freqout = IGC_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT0; + tsim_mask = IGC_TSICR_TT0; + } + trgttiml = IGC_TRGTTIML0; + trgttimh = IGC_TRGTTIMH0; + freqout = IGC_FREQOUT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 | + IGC_TSAUXC_ST1); + tsim &= ~IGC_TSICR_TT1; + } else { + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 | + IGC_TSAUXC_ST0); + tsim &= ~IGC_TSICR_TT0; + } + if (on) { + struct timespec64 safe_start; + int i = rq->perout.index; + + igc_pin_perout(igc, i, pin, use_freq); + igc_ptp_read(igc, &safe_start); + + /* PPS output start time is triggered by Target time(TT) + * register. Programming any past time value into TT + * register will cause PPS to never start. Need to make + * sure we program the TT register a time ahead in + * future. There isn't a stringent need to fire PPS out + * right away. Adding +2 seconds should take care of + * corner cases. Let's say if the SYSTIML is close to + * wrap up and the timer keeps ticking as we program the + * register, adding +2seconds is safe bet. + */ + safe_start.tv_sec += 2; + + if (rq->perout.start.sec < safe_start.tv_sec) + igc->perout[i].start.tv_sec = safe_start.tv_sec; + else + igc->perout[i].start.tv_sec = rq->perout.start.sec; + igc->perout[i].start.tv_nsec = rq->perout.start.nsec; + igc->perout[i].period.tv_sec = ts.tv_sec; + igc->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, (u32)igc->perout[i].start.tv_sec); + /* For now, always select timer 0 as source. */ + wr32(trgttiml, (u32)(igc->perout[i].start.tv_nsec | + IGC_TT_IO_TIMER_SEL_SYSTIM0)); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsim = rd32(IGC_TSIM); + if (on) + tsim |= IGC_TSICR_SYS_WRAP; + else + tsim &= ~IGC_TSICR_SYS_WRAP; + igc->pps_sys_wrap_on = on; + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * Returns 0 on success. + **/ +static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + switch (adapter->hw.mac.type) { + case igc_i225: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + return -EINVAL; + } + return 0; +} + +/** + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer + * @adapter: Pointer to adapter the packet buffer belongs to + * @buf: Pointer to packet buffer + * + * This function retrieves the timestamp saved in the beginning of packet + * buffer. While two timestamps are available, one in timer0 reference and the + * other in timer1 reference, this function considers only the timestamp in + * timer0 reference. + * + * Returns timestamp value. + */ +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf) +{ + ktime_t timestamp; + u32 secs, nsecs; + int adjust; + + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. + */ + nsecs = le32_to_cpu(buf[2]); + secs = le32_to_cpu(buf[3]); + + timestamp = ktime_set(secs, nsecs); + + /* Adjust timestamp for the RX latency based on link speed */ + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_RX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_RX_LATENCY_2500; + break; + default: + adjust = 0; + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); + break; + } + + return ktime_sub_ns(timestamp, adjust); +} + +static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + wr32(IGC_TSYNCRXCTL, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + val &= ~IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = rd32(IGC_RXPBS); + val &= ~IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); +} + +static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + val = rd32(IGC_RXPBS); + val |= IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + /* FIXME: For now, only support retrieving RX timestamps from + * timer 0. + */ + val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) | + IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL | + IGC_TSYNCRXCTL_RXSYNSIG; + wr32(IGC_TSYNCRXCTL, val); +} + +static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + /* Clear the flags first to avoid new packets to be enqueued + * for TX timestamping. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + clear_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags); + } + + /* Now we can clean the pending TX timestamp requests. */ + igc_ptp_clear_tx_tstamp(adapter); + + wr32(IGC_TSYNCTXCTL, 0); +} + +static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG); + + /* Read TXSTMP registers to discard any timestamp previously stored. */ + rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); + + /* The hardware is ready to accept TX timestamp requests, + * notify the transmit path. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + set_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags); + } + +} + +/** + * igc_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, + struct hwtstamp_config *config) +{ + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + igc_ptp_disable_tx_timestamp(adapter); + break; + case HWTSTAMP_TX_ON: + igc_ptp_enable_tx_timestamp(adapter); + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + igc_ptp_disable_rx_timestamp(adapter); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + igc_ptp_enable_rx_timestamp(adapter); + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + return 0; +} + +/* Requires adapter->ptp_tx_lock held by caller. */ +static void igc_ptp_tx_timeout(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */ + rd32(IGC_TXSTMPH); + netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); +} + +void igc_ptp_tx_hang(struct igc_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + if (!adapter->ptp_tx_skb) + goto unlock; + + if (time_is_after_jiffies(adapter->ptp_tx_start + IGC_PTP_TX_TIMEOUT)) + goto unlock; + + igc_ptp_tx_timeout(adapter); + +unlock: + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +/** + * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + * + * Context: Expects adapter->ptp_tx_lock to be held by caller. + */ +static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) +{ + struct sk_buff *skb = adapter->ptp_tx_skb; + struct skb_shared_hwtstamps shhwtstamps; + struct igc_hw *hw = &adapter->hw; + u32 tsynctxctl; + int adjust = 0; + u64 regval; + + if (WARN_ON_ONCE(!skb)) + return; + + tsynctxctl = rd32(IGC_TSYNCTXCTL); + tsynctxctl &= IGC_TSYNCTXCTL_TXTT_0; + if (tsynctxctl) { + regval = rd32(IGC_TXSTMPL); + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + } else { + /* There's a bug in the hardware that could cause + * missing interrupts for TX timestamping. The issue + * is that for new interrupts to be triggered, the + * IGC_TXSTMPH_0 register must be read. + * + * To avoid discarding a valid timestamp that just + * happened at the "wrong" time, we need to confirm + * that there was no timestamp captured, we do that by + * assuming that no two timestamps in sequence have + * the same nanosecond value. + * + * So, we read the "low" register, read the "high" + * register (to latch a new timestamp) and read the + * "low" register again, if "old" and "new" versions + * of the "low" register are different, a valid + * timestamp was captured, we can read the "high" + * register again. + */ + u32 txstmpl_old, txstmpl_new; + + txstmpl_old = rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); + txstmpl_new = rd32(IGC_TXSTMPL); + + if (txstmpl_old == txstmpl_new) + return; + + regval = txstmpl_new; + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + } + if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_TX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_TX_LATENCY_2500; + break; + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + adapter->ptp_tx_skb = NULL; + + /* Notify the stack and free the skb after we've unlocked */ + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igc_ptp_tx_tstamp_event + * @adapter: board private structure + * + * Called when a TX timestamp interrupt happens to retrieve the + * timestamp and send it up to the socket. + */ +void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + if (!adapter->ptp_tx_skb) + goto unlock; + + igc_ptp_tx_hwtstamp(adapter); + +unlock: + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +/** + * igc_ptp_set_ts_config - set hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + **/ +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igc_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igc_ptp_get_ts_config - get hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/* The two conditions below must be met for cross timestamping via + * PCIe PTM: + * + * 1. We have an way to convert the timestamps in the PTM messages + * to something related to the system clocks (right now, only + * X86 systems with support for the Always Running Timer allow that); + * + * 2. We have PTM enabled in the path from the device to the PCIe root port. + */ +static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) +{ + if (!IS_ENABLED(CONFIG_X86_TSC)) + return false; + + /* FIXME: it was noticed that enabling support for PCIe PTM in + * some i225-V models could cause lockups when bringing the + * interface up/down. There should be no downsides to + * disabling crosstimestamping support for i225-V, as it + * doesn't have any PTP support. That way we gain some time + * while root causing the issue. + */ + if (adapter->pdev->device == IGC_DEV_ID_I225_V) + return false; + + return pcie_ptm_enabled(adapter->pdev); +} + +static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) +{ +#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML) + return convert_art_ns_to_tsc(tstamp); +#else + return (struct system_counterval_t) { }; +#endif +} + +static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) +{ + struct net_device *netdev = adapter->netdev; + + switch (ptm_stat) { + case IGC_PTM_STAT_RET_ERR: + netdev_err(netdev, "PTM Error: Root port timeout\n"); + break; + case IGC_PTM_STAT_BAD_PTM_RES: + netdev_err(netdev, "PTM Error: Bad response, PTM Response Data expected\n"); + break; + case IGC_PTM_STAT_T4M1_OVFL: + netdev_err(netdev, "PTM Error: T4 minus T1 overflow\n"); + break; + case IGC_PTM_STAT_ADJUST_1ST: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during first PTM cycle\n"); + break; + case IGC_PTM_STAT_ADJUST_CYC: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during non-first PTM cycle\n"); + break; + default: + netdev_err(netdev, "PTM Error: Unknown error (%#x)\n", ptm_stat); + break; + } +} + +static int igc_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + u32 stat, t2_curr_h, t2_curr_l, ctrl; + struct igc_adapter *adapter = ctx; + struct igc_hw *hw = &adapter->hw; + int err, count = 100; + ktime_t t1, t2_curr; + + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); + + do { + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ + + /* To "manually" start the PTM cycle we need to clear and + * then set again the TRIG bit. + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + + /* The cycle only starts "for real" when software notifies + * that it has read the registers, this is done by setting + * VALID bit. + */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT); + if (err < 0) { + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + return err; + } + + if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) + break; + + if (stat & ~IGC_PTM_STAT_VALID) { + /* An error occurred, log it. */ + igc_ptm_log_error(adapter, stat); + /* The STAT register is write-1-to-clear (W1C), + * so write the previous error status to clear it. + */ + wr32(IGC_PTM_STAT, stat); + continue; + } + } while (--count); + + if (!count) { + netdev_err(adapter->netdev, "Exceeded number of tries for PTM cycle\n"); + return -ETIMEDOUT; + } + + t1 = ktime_set(rd32(IGC_PTM_T1_TIM0_H), rd32(IGC_PTM_T1_TIM0_L)); + + t2_curr_l = rd32(IGC_PTM_CURR_T2_L); + t2_curr_h = rd32(IGC_PTM_CURR_T2_H); + + /* FIXME: When the register that tells the endianness of the + * PTM registers are implemented, check them here and add the + * appropriate conversion. + */ + t2_curr_h = swab32(t2_curr_h); + + t2_curr = ((s64)t2_curr_h << 32 | t2_curr_l); + + *device = t1; + *system = igc_device_tstamp_to_system(t2_curr); + + return 0; +} + +static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) +{ + struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, + ptp_caps); + + return get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); +} + +/** + * igc_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igc_ptp_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_hw *hw = &adapter->hw; + int i; + + switch (hw->mac.type) { + case igc_i225: + for (i = 0; i < IGC_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225; + adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225; + adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; + adapter->ptp_caps.settime64 = igc_ptp_settime_i225; + adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; + adapter->ptp_caps.n_per_out = IGC_N_PEROUT; + adapter->ptp_caps.n_pins = IGC_N_SDP; + adapter->ptp_caps.verify = igc_ptp_verify_pin; + + if (!igc_is_crosststamp_supported(adapter)) + break; + + adapter->ptp_caps.getcrosststamp = igc_ptp_getcrosststamp; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->ptp_tx_lock); + spin_lock_init(&adapter->tmreg_lock); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real()); + adapter->ptp_reset_start = ktime_get(); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + netdev_err(netdev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + netdev_info(netdev, "PHC added\n"); + adapter->ptp_flags |= IGC_PTP_ENABLED; + } +} + +static void igc_ptp_time_save(struct igc_adapter *adapter) +{ + igc_ptp_read(adapter, &adapter->prev_ptp_time); + adapter->ptp_reset_start = ktime_get(); +} + +static void igc_ptp_time_restore(struct igc_adapter *adapter) +{ + struct timespec64 ts = adapter->prev_ptp_time; + ktime_t delta; + + delta = ktime_sub(ktime_get(), adapter->ptp_reset_start); + + timespec64_add_ns(&ts, ktime_to_ns(delta)); + + igc_ptp_write_i225(adapter, &ts); +} + +static void igc_ptm_stop(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_EN; + + wr32(IGC_PTM_CTRL, ctrl); +} + +/** + * igc_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igc_ptp_suspend(struct igc_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + + igc_ptp_clear_tx_tstamp(adapter); + + if (pci_device_is_present(adapter->pdev)) { + igc_ptp_time_save(adapter); + igc_ptm_stop(adapter); + } +} + +/** + * igc_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igc_ptp_stop(struct igc_adapter *adapter) +{ + igc_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + netdev_info(adapter->netdev, "PHC removed\n"); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; + } +} + +/** + * igc_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igc_ptp_reset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 cycle_ctrl, ctrl; + unsigned long flags; + u32 timadj; + + /* reset the tstamp_config */ + igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case igc_i225: + timadj = rd32(IGC_TIMADJ); + timadj |= IGC_TIMADJ_ADJUST_METH; + wr32(IGC_TIMADJ, timadj); + + wr32(IGC_TSAUXC, 0x0); + wr32(IGC_TSSDP, 0x0); + wr32(IGC_TSIM, + IGC_TSICR_INTERRUPTS | + (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); + wr32(IGC_IMS, IGC_IMS_TS); + + if (!igc_is_crosststamp_supported(adapter)) + break; + + wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); + wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); + + cycle_ctrl = IGC_PTM_CYCLE_CTRL_CYC_TIME(IGC_PTM_CYC_TIME_DEFAULT); + + wr32(IGC_PTM_CYCLE_CTRL, cycle_ctrl); + + ctrl = IGC_PTM_CTRL_EN | + IGC_PTM_CTRL_START_NOW | + IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | + IGC_PTM_CTRL_TRIG; + + wr32(IGC_PTM_CTRL, ctrl); + + /* Force the first cycle to run. */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if (hw->mac.type == igc_i225) { + igc_ptp_time_restore(adapter); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); +} diff --git a/devices/igc/igc_ptp-6.6-ethercat.c b/devices/igc/igc_ptp-6.6-ethercat.c new file mode 100644 index 00000000..e2cee879 --- /dev/null +++ b/devices/igc/igc_ptp-6.6-ethercat.c @@ -0,0 +1,1265 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc-6.6-ethercat.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +#define IGC_PTP_TX_TIMEOUT (HZ * 15) + +#define IGC_PTM_STAT_SLEEP 2 +#define IGC_PTM_STAT_TIMEOUT 100 + +/* SYSTIM read access for I225 */ +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp is latched when SYSTIML is read. */ + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igc_ptp_write_i225(struct igc_adapter *adapter, + const struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_SYSTIML, ts->tv_nsec); + wr32(IGC_SYSTIMH, ts->tv_sec); +} + +static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 14; + rate = div_u64(rate, 78125); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(IGC_TIMINCA, inca); + + return 0; +} + +static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct timespec64 now, then = ns_to_timespec64(delta); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_read(igc, &now); + now = timespec64_add(now, then); + igc_ptp_write_i225(igc, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + ptp_read_system_prets(sts); + ts->tv_nsec = rd32(IGC_SYSTIML); + ts->tv_sec = rd32(IGC_SYSTIMH); + ptp_read_system_postts(sts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_settime_i225(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_write_i225(igc, ts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGC_N_SDP] = { + IGC_CTRL_SDP0_DIR, + IGC_CTRL_SDP1_DIR, + IGC_CTRL_EXT_SDP2_DIR, + IGC_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0, + IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0, + }; + static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1, + IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1, + }; + static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0, + IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0, + }; + static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin]) + tssdp &= ~IGC_AUX0_TS_SDP_EN; + + if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin]) + tssdp &= ~IGC_AUX1_TS_SDP_EN; + + tssdp &= ~igc_ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_fc1[pin]; + else + tssdp |= igc_ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_tt1[pin]; + else + tssdp |= igc_ts_sdp_sel_tt0[pin]; + } + tssdp |= igc_ts_sdp_en[pin]; + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~igc_ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~IGC_AUX1_SEL_SDP3; + tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN; + } else { + tssdp &= ~IGC_AUX0_SEL_SDP3; + tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN; + } + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igc_adapter *igc = + container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = IGC_TSAUXC_EN_TS1; + tsim_mask = IGC_TSICR_AUTT1; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TS0; + tsim_mask = IGC_TSICR_AUTT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (on) { + igc_pin_extts(igc, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && (ns <= 70000000LL || ns == 125000000LL || + ns == 250000000LL || ns == 500000000LL)) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT1; + tsim_mask = IGC_TSICR_TT1; + } + trgttiml = IGC_TRGTTIML1; + trgttimh = IGC_TRGTTIMH1; + freqout = IGC_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT0; + tsim_mask = IGC_TSICR_TT0; + } + trgttiml = IGC_TRGTTIML0; + trgttimh = IGC_TRGTTIMH0; + freqout = IGC_FREQOUT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 | + IGC_TSAUXC_ST1); + tsim &= ~IGC_TSICR_TT1; + } else { + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 | + IGC_TSAUXC_ST0); + tsim &= ~IGC_TSICR_TT0; + } + if (on) { + struct timespec64 safe_start; + int i = rq->perout.index; + + igc_pin_perout(igc, i, pin, use_freq); + igc_ptp_read(igc, &safe_start); + + /* PPS output start time is triggered by Target time(TT) + * register. Programming any past time value into TT + * register will cause PPS to never start. Need to make + * sure we program the TT register a time ahead in + * future. There isn't a stringent need to fire PPS out + * right away. Adding +2 seconds should take care of + * corner cases. Let's say if the SYSTIML is close to + * wrap up and the timer keeps ticking as we program the + * register, adding +2seconds is safe bet. + */ + safe_start.tv_sec += 2; + + if (rq->perout.start.sec < safe_start.tv_sec) + igc->perout[i].start.tv_sec = safe_start.tv_sec; + else + igc->perout[i].start.tv_sec = rq->perout.start.sec; + igc->perout[i].start.tv_nsec = rq->perout.start.nsec; + igc->perout[i].period.tv_sec = ts.tv_sec; + igc->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, (u32)igc->perout[i].start.tv_sec); + /* For now, always select timer 0 as source. */ + wr32(trgttiml, (u32)(igc->perout[i].start.tv_nsec | + IGC_TT_IO_TIMER_SEL_SYSTIM0)); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsim = rd32(IGC_TSIM); + if (on) + tsim |= IGC_TSICR_SYS_WRAP; + else + tsim &= ~IGC_TSICR_SYS_WRAP; + igc->pps_sys_wrap_on = on; + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * Returns 0 on success. + **/ +static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + switch (adapter->hw.mac.type) { + case igc_i225: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + return -EINVAL; + } + return 0; +} + +/** + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer + * @adapter: Pointer to adapter the packet buffer belongs to + * @buf: Pointer to packet buffer + * + * This function retrieves the timestamp saved in the beginning of packet + * buffer. While two timestamps are available, one in timer0 reference and the + * other in timer1 reference, this function considers only the timestamp in + * timer0 reference. + * + * Returns timestamp value. + */ +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf) +{ + ktime_t timestamp; + u32 secs, nsecs; + int adjust; + + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. + */ + nsecs = le32_to_cpu(buf[2]); + secs = le32_to_cpu(buf[3]); + + timestamp = ktime_set(secs, nsecs); + + /* Adjust timestamp for the RX latency based on link speed */ + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_RX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_RX_LATENCY_2500; + break; + default: + adjust = 0; + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); + break; + } + + return ktime_sub_ns(timestamp, adjust); +} + +static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + wr32(IGC_TSYNCRXCTL, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + val &= ~IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = rd32(IGC_RXPBS); + val &= ~IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); +} + +static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + val = rd32(IGC_RXPBS); + val |= IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + /* FIXME: For now, only support retrieving RX timestamps from + * timer 0. + */ + val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) | + IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL | + IGC_TSYNCRXCTL_RXSYNSIG; + wr32(IGC_TSYNCRXCTL, val); +} + +static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; + + dev_kfree_skb_any(tstamp->skb); + tstamp->skb = NULL; + } + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + /* Clear the flags first to avoid new packets to be enqueued + * for TX timestamping. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + clear_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags); + } + + /* Now we can clean the pending TX timestamp requests. */ + igc_ptp_clear_tx_tstamp(adapter); + + wr32(IGC_TSYNCTXCTL, 0); +} + +static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG); + + /* Read TXSTMP registers to discard any timestamp previously stored. */ + rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); + + /* The hardware is ready to accept TX timestamp requests, + * notify the transmit path. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + set_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags); + } + +} + +/** + * igc_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, + struct hwtstamp_config *config) +{ + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + igc_ptp_disable_tx_timestamp(adapter); + break; + case HWTSTAMP_TX_ON: + igc_ptp_enable_tx_timestamp(adapter); + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + igc_ptp_disable_rx_timestamp(adapter); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + igc_ptp_enable_rx_timestamp(adapter); + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + return 0; +} + +/* Requires adapter->ptp_tx_lock held by caller. */ +static void igc_ptp_tx_timeout(struct igc_adapter *adapter, + struct igc_tx_timestamp_request *tstamp) +{ + dev_kfree_skb_any(tstamp->skb); + tstamp->skb = NULL; + adapter->tx_hwtstamp_timeouts++; + + netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); +} + +void igc_ptp_tx_hang(struct igc_adapter *adapter) +{ + struct igc_tx_timestamp_request *tstamp; + struct igc_hw *hw = &adapter->hw; + unsigned long flags; + bool found = false; + int i; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + tstamp = &adapter->tx_tstamp[i]; + + if (!tstamp->skb) + continue; + + if (time_is_after_jiffies(tstamp->start + IGC_PTP_TX_TIMEOUT)) + continue; + + igc_ptp_tx_timeout(adapter, tstamp); + found = true; + } + + if (found) { + /* Reading the high register of the first set of timestamp registers + * clears all the equivalent bits in the TSYNCTXCTL register. + */ + rd32(IGC_TXSTMPH_0); + } + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter, + struct igc_tx_timestamp_request *tstamp, u64 regval) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff *skb; + int adjust = 0; + + skb = tstamp->skb; + if (!skb) + return; + + if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_TX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_TX_LATENCY_2500; + break; + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + tstamp->skb = NULL; + + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure + * + * Check against the ready mask for which of the timestamp register + * sets are ready to be retrieved, then retrieve that and notify the + * rest of the stack. + * + * Context: Expects adapter->ptp_tx_lock to be held by caller. + */ +static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u64 regval; + u32 mask; + int i; + + mask = rd32(IGC_TSYNCTXCTL) & IGC_TSYNCTXCTL_TXTT_ANY; + if (mask & IGC_TSYNCTXCTL_TXTT_0) { + regval = rd32(IGC_TXSTMPL); + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + } else { + /* There's a bug in the hardware that could cause + * missing interrupts for TX timestamping. The issue + * is that for new interrupts to be triggered, the + * IGC_TXSTMPH_0 register must be read. + * + * To avoid discarding a valid timestamp that just + * happened at the "wrong" time, we need to confirm + * that there was no timestamp captured, we do that by + * assuming that no two timestamps in sequence have + * the same nanosecond value. + * + * So, we read the "low" register, read the "high" + * register (to latch a new timestamp) and read the + * "low" register again, if "old" and "new" versions + * of the "low" register are different, a valid + * timestamp was captured, we can read the "high" + * register again. + */ + u32 txstmpl_old, txstmpl_new; + + txstmpl_old = rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); + txstmpl_new = rd32(IGC_TXSTMPL); + + if (txstmpl_old == txstmpl_new) + goto done; + + regval = txstmpl_new; + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + } + + igc_ptp_tx_reg_to_stamp(adapter, &adapter->tx_tstamp[0], regval); + +done: + /* Now that the problematic first register was handled, we can + * use retrieve the timestamps from the other registers + * (starting from '1') with less complications. + */ + for (i = 1; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; + + if (!(tstamp->mask & mask)) + continue; + + regval = rd32(tstamp->regl); + regval |= (u64)rd32(tstamp->regh) << 32; + + igc_ptp_tx_reg_to_stamp(adapter, tstamp, regval); + } +} + +/** + * igc_ptp_tx_tstamp_event + * @adapter: board private structure + * + * Called when a TX timestamp interrupt happens to retrieve the + * timestamp and send it up to the socket. + */ +void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + igc_ptp_tx_hwtstamp(adapter); + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +/** + * igc_ptp_set_ts_config - set hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + **/ +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igc_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igc_ptp_get_ts_config - get hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/* The two conditions below must be met for cross timestamping via + * PCIe PTM: + * + * 1. We have an way to convert the timestamps in the PTM messages + * to something related to the system clocks (right now, only + * X86 systems with support for the Always Running Timer allow that); + * + * 2. We have PTM enabled in the path from the device to the PCIe root port. + */ +static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) +{ + if (!IS_ENABLED(CONFIG_X86_TSC)) + return false; + + /* FIXME: it was noticed that enabling support for PCIe PTM in + * some i225-V models could cause lockups when bringing the + * interface up/down. There should be no downsides to + * disabling crosstimestamping support for i225-V, as it + * doesn't have any PTP support. That way we gain some time + * while root causing the issue. + */ + if (adapter->pdev->device == IGC_DEV_ID_I225_V) + return false; + + return pcie_ptm_enabled(adapter->pdev); +} + +static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) +{ +#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML) + return convert_art_ns_to_tsc(tstamp); +#else + return (struct system_counterval_t) { }; +#endif +} + +static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) +{ + struct net_device *netdev = adapter->netdev; + + switch (ptm_stat) { + case IGC_PTM_STAT_RET_ERR: + netdev_err(netdev, "PTM Error: Root port timeout\n"); + break; + case IGC_PTM_STAT_BAD_PTM_RES: + netdev_err(netdev, "PTM Error: Bad response, PTM Response Data expected\n"); + break; + case IGC_PTM_STAT_T4M1_OVFL: + netdev_err(netdev, "PTM Error: T4 minus T1 overflow\n"); + break; + case IGC_PTM_STAT_ADJUST_1ST: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during first PTM cycle\n"); + break; + case IGC_PTM_STAT_ADJUST_CYC: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during non-first PTM cycle\n"); + break; + default: + netdev_err(netdev, "PTM Error: Unknown error (%#x)\n", ptm_stat); + break; + } +} + +static int igc_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + u32 stat, t2_curr_h, t2_curr_l, ctrl; + struct igc_adapter *adapter = ctx; + struct igc_hw *hw = &adapter->hw; + int err, count = 100; + ktime_t t1, t2_curr; + + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); + + do { + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ + + /* To "manually" start the PTM cycle we need to clear and + * then set again the TRIG bit. + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + + /* The cycle only starts "for real" when software notifies + * that it has read the registers, this is done by setting + * VALID bit. + */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT); + if (err < 0) { + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + return err; + } + + if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) + break; + + if (stat & ~IGC_PTM_STAT_VALID) { + /* An error occurred, log it. */ + igc_ptm_log_error(adapter, stat); + /* The STAT register is write-1-to-clear (W1C), + * so write the previous error status to clear it. + */ + wr32(IGC_PTM_STAT, stat); + continue; + } + } while (--count); + + if (!count) { + netdev_err(adapter->netdev, "Exceeded number of tries for PTM cycle\n"); + return -ETIMEDOUT; + } + + t1 = ktime_set(rd32(IGC_PTM_T1_TIM0_H), rd32(IGC_PTM_T1_TIM0_L)); + + t2_curr_l = rd32(IGC_PTM_CURR_T2_L); + t2_curr_h = rd32(IGC_PTM_CURR_T2_H); + + /* FIXME: When the register that tells the endianness of the + * PTM registers are implemented, check them here and add the + * appropriate conversion. + */ + t2_curr_h = swab32(t2_curr_h); + + t2_curr = ((s64)t2_curr_h << 32 | t2_curr_l); + + *device = t1; + *system = igc_device_tstamp_to_system(t2_curr); + + return 0; +} + +static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) +{ + struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, + ptp_caps); + + return get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); +} + +/** + * igc_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igc_ptp_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_tx_timestamp_request *tstamp; + struct igc_hw *hw = &adapter->hw; + int i; + + tstamp = &adapter->tx_tstamp[0]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_0; + tstamp->regl = IGC_TXSTMPL_0; + tstamp->regh = IGC_TXSTMPH_0; + tstamp->flags = 0; + + tstamp = &adapter->tx_tstamp[1]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_1; + tstamp->regl = IGC_TXSTMPL_1; + tstamp->regh = IGC_TXSTMPH_1; + tstamp->flags = IGC_TX_FLAGS_TSTAMP_1; + + tstamp = &adapter->tx_tstamp[2]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_2; + tstamp->regl = IGC_TXSTMPL_2; + tstamp->regh = IGC_TXSTMPH_2; + tstamp->flags = IGC_TX_FLAGS_TSTAMP_2; + + tstamp = &adapter->tx_tstamp[3]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_3; + tstamp->regl = IGC_TXSTMPL_3; + tstamp->regh = IGC_TXSTMPH_3; + tstamp->flags = IGC_TX_FLAGS_TSTAMP_3; + + switch (hw->mac.type) { + case igc_i225: + for (i = 0; i < IGC_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225; + adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225; + adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; + adapter->ptp_caps.settime64 = igc_ptp_settime_i225; + adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; + adapter->ptp_caps.n_per_out = IGC_N_PEROUT; + adapter->ptp_caps.n_pins = IGC_N_SDP; + adapter->ptp_caps.verify = igc_ptp_verify_pin; + + if (!igc_is_crosststamp_supported(adapter)) + break; + + adapter->ptp_caps.getcrosststamp = igc_ptp_getcrosststamp; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->ptp_tx_lock); + spin_lock_init(&adapter->tmreg_lock); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real()); + adapter->ptp_reset_start = ktime_get(); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + netdev_err(netdev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + netdev_info(netdev, "PHC added\n"); + adapter->ptp_flags |= IGC_PTP_ENABLED; + } +} + +static void igc_ptp_time_save(struct igc_adapter *adapter) +{ + igc_ptp_read(adapter, &adapter->prev_ptp_time); + adapter->ptp_reset_start = ktime_get(); +} + +static void igc_ptp_time_restore(struct igc_adapter *adapter) +{ + struct timespec64 ts = adapter->prev_ptp_time; + ktime_t delta; + + delta = ktime_sub(ktime_get(), adapter->ptp_reset_start); + + timespec64_add_ns(&ts, ktime_to_ns(delta)); + + igc_ptp_write_i225(adapter, &ts); +} + +static void igc_ptm_stop(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_EN; + + wr32(IGC_PTM_CTRL, ctrl); +} + +/** + * igc_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igc_ptp_suspend(struct igc_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + + igc_ptp_clear_tx_tstamp(adapter); + + if (pci_device_is_present(adapter->pdev)) { + igc_ptp_time_save(adapter); + igc_ptm_stop(adapter); + } +} + +/** + * igc_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igc_ptp_stop(struct igc_adapter *adapter) +{ + igc_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + netdev_info(adapter->netdev, "PHC removed\n"); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; + } +} + +/** + * igc_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igc_ptp_reset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 cycle_ctrl, ctrl; + unsigned long flags; + u32 timadj; + + /* reset the tstamp_config */ + igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case igc_i225: + timadj = rd32(IGC_TIMADJ); + timadj |= IGC_TIMADJ_ADJUST_METH; + wr32(IGC_TIMADJ, timadj); + + wr32(IGC_TSAUXC, 0x0); + wr32(IGC_TSSDP, 0x0); + wr32(IGC_TSIM, + IGC_TSICR_INTERRUPTS | + (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); + wr32(IGC_IMS, IGC_IMS_TS); + + if (!igc_is_crosststamp_supported(adapter)) + break; + + wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); + wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); + + cycle_ctrl = IGC_PTM_CYCLE_CTRL_CYC_TIME(IGC_PTM_CYC_TIME_DEFAULT); + + wr32(IGC_PTM_CYCLE_CTRL, cycle_ctrl); + + ctrl = IGC_PTM_CTRL_EN | + IGC_PTM_CTRL_START_NOW | + IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | + IGC_PTM_CTRL_TRIG; + + wr32(IGC_PTM_CTRL, ctrl); + + /* Force the first cycle to run. */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if (hw->mac.type == igc_i225) { + igc_ptp_time_restore(adapter); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); +} diff --git a/devices/igc/igc_ptp-6.6-orig.c b/devices/igc/igc_ptp-6.6-orig.c new file mode 100644 index 00000000..928f3879 --- /dev/null +++ b/devices/igc/igc_ptp-6.6-orig.c @@ -0,0 +1,1265 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +#define IGC_PTP_TX_TIMEOUT (HZ * 15) + +#define IGC_PTM_STAT_SLEEP 2 +#define IGC_PTM_STAT_TIMEOUT 100 + +/* SYSTIM read access for I225 */ +void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp is latched when SYSTIML is read. */ + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igc_ptp_write_i225(struct igc_adapter *adapter, + const struct timespec64 *ts) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_SYSTIML, ts->tv_nsec); + wr32(IGC_SYSTIMH, ts->tv_sec); +} + +static int igc_ptp_adjfine_i225(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + rate = scaled_ppm; + rate <<= 14; + rate = div_u64(rate, 78125); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(IGC_TIMINCA, inca); + + return 0; +} + +static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct timespec64 now, then = ns_to_timespec64(delta); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_read(igc, &now); + now = timespec64_add(now, then); + igc_ptp_write_i225(igc, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + ptp_read_system_prets(sts); + ts->tv_nsec = rd32(IGC_SYSTIML); + ts->tv_sec = rd32(IGC_SYSTIMH); + ptp_read_system_postts(sts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static int igc_ptp_settime_i225(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igc_adapter *igc = container_of(ptp, struct igc_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igc->tmreg_lock, flags); + + igc_ptp_write_i225(igc, ts); + + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + + return 0; +} + +static void igc_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGC_N_SDP] = { + IGC_CTRL_SDP0_DIR, + IGC_CTRL_SDP1_DIR, + IGC_CTRL_EXT_SDP2_DIR, + IGC_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igc_pin_perout(struct igc_adapter *igc, int chan, int pin, int freq) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + static const u32 igc_ts_sdp_sel_tt0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT0, IGC_TS_SDP1_SEL_TT0, + IGC_TS_SDP2_SEL_TT0, IGC_TS_SDP3_SEL_TT0, + }; + static const u32 igc_ts_sdp_sel_tt1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_TT1, IGC_TS_SDP1_SEL_TT1, + IGC_TS_SDP2_SEL_TT1, IGC_TS_SDP3_SEL_TT1, + }; + static const u32 igc_ts_sdp_sel_fc0[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC0, IGC_TS_SDP1_SEL_FC0, + IGC_TS_SDP2_SEL_FC0, IGC_TS_SDP3_SEL_FC0, + }; + static const u32 igc_ts_sdp_sel_fc1[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + static const u32 igc_ts_sdp_sel_clr[IGC_N_SDP] = { + IGC_TS_SDP0_SEL_FC1, IGC_TS_SDP1_SEL_FC1, + IGC_TS_SDP2_SEL_FC1, IGC_TS_SDP3_SEL_FC1, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & IGC_AUX0_SEL_SDP3) == igc_aux0_sel_sdp[pin]) + tssdp &= ~IGC_AUX0_TS_SDP_EN; + + if ((tssdp & IGC_AUX1_SEL_SDP3) == igc_aux1_sel_sdp[pin]) + tssdp &= ~IGC_AUX1_TS_SDP_EN; + + tssdp &= ~igc_ts_sdp_sel_clr[pin]; + if (freq) { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_fc1[pin]; + else + tssdp |= igc_ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= igc_ts_sdp_sel_tt1[pin]; + else + tssdp |= igc_ts_sdp_sel_tt0[pin]; + } + tssdp |= igc_ts_sdp_en[pin]; + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static void igc_pin_extts(struct igc_adapter *igc, int chan, int pin) +{ + static const u32 igc_aux0_sel_sdp[IGC_N_SDP] = { + IGC_AUX0_SEL_SDP0, IGC_AUX0_SEL_SDP1, IGC_AUX0_SEL_SDP2, IGC_AUX0_SEL_SDP3, + }; + static const u32 igc_aux1_sel_sdp[IGC_N_SDP] = { + IGC_AUX1_SEL_SDP0, IGC_AUX1_SEL_SDP1, IGC_AUX1_SEL_SDP2, IGC_AUX1_SEL_SDP3, + }; + static const u32 igc_ts_sdp_en[IGC_N_SDP] = { + IGC_TS_SDP0_EN, IGC_TS_SDP1_EN, IGC_TS_SDP2_EN, IGC_TS_SDP3_EN, + }; + struct igc_hw *hw = &igc->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(IGC_CTRL); + ctrl_ext = rd32(IGC_CTRL_EXT); + tssdp = rd32(IGC_TSSDP); + + igc_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~igc_ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~IGC_AUX1_SEL_SDP3; + tssdp |= igc_aux1_sel_sdp[pin] | IGC_AUX1_TS_SDP_EN; + } else { + tssdp &= ~IGC_AUX0_SEL_SDP3; + tssdp |= igc_aux0_sel_sdp[pin] | IGC_AUX0_TS_SDP_EN; + } + + wr32(IGC_TSSDP, tssdp); + wr32(IGC_CTRL, ctrl); + wr32(IGC_CTRL_EXT, ctrl_ext); +} + +static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igc_adapter *igc = + container_of(ptp, struct igc_adapter, ptp_caps); + struct igc_hw *hw = &igc->hw; + unsigned long flags; + struct timespec64 ts; + int use_freq = 0, pin = -1; + u32 tsim, tsauxc, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = IGC_TSAUXC_EN_TS1; + tsim_mask = IGC_TSICR_AUTT1; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TS0; + tsim_mask = IGC_TSICR_AUTT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (on) { + igc_pin_extts(igc, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && (ns <= 70000000LL || ns == 125000000LL || + ns == 250000000LL || ns == 500000000LL)) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; + } + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT1; + tsim_mask = IGC_TSICR_TT1; + } + trgttiml = IGC_TRGTTIML1; + trgttimh = IGC_TRGTTIMH1; + freqout = IGC_FREQOUT1; + } else { + if (use_freq) { + tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = IGC_TSAUXC_EN_TT0; + tsim_mask = IGC_TSICR_TT0; + } + trgttiml = IGC_TRGTTIML0; + trgttimh = IGC_TRGTTIMH0; + freqout = IGC_FREQOUT0; + } + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsauxc = rd32(IGC_TSAUXC); + tsim = rd32(IGC_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 | + IGC_TSAUXC_ST1); + tsim &= ~IGC_TSICR_TT1; + } else { + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 | + IGC_TSAUXC_ST0); + tsim &= ~IGC_TSICR_TT0; + } + if (on) { + struct timespec64 safe_start; + int i = rq->perout.index; + + igc_pin_perout(igc, i, pin, use_freq); + igc_ptp_read(igc, &safe_start); + + /* PPS output start time is triggered by Target time(TT) + * register. Programming any past time value into TT + * register will cause PPS to never start. Need to make + * sure we program the TT register a time ahead in + * future. There isn't a stringent need to fire PPS out + * right away. Adding +2 seconds should take care of + * corner cases. Let's say if the SYSTIML is close to + * wrap up and the timer keeps ticking as we program the + * register, adding +2seconds is safe bet. + */ + safe_start.tv_sec += 2; + + if (rq->perout.start.sec < safe_start.tv_sec) + igc->perout[i].start.tv_sec = safe_start.tv_sec; + else + igc->perout[i].start.tv_sec = rq->perout.start.sec; + igc->perout[i].start.tv_nsec = rq->perout.start.nsec; + igc->perout[i].period.tv_sec = ts.tv_sec; + igc->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, (u32)igc->perout[i].start.tv_sec); + /* For now, always select timer 0 as source. */ + wr32(trgttiml, (u32)(igc->perout[i].start.tv_nsec | + IGC_TT_IO_TIMER_SEL_SYSTIM0)); + if (use_freq) + wr32(freqout, ns); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(IGC_TSAUXC, tsauxc); + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igc->tmreg_lock, flags); + tsim = rd32(IGC_TSIM); + if (on) + tsim |= IGC_TSICR_SYS_WRAP; + else + tsim &= ~IGC_TSICR_SYS_WRAP; + igc->pps_sys_wrap_on = on; + wr32(IGC_TSIM, tsim); + spin_unlock_irqrestore(&igc->tmreg_lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igc_ptp_systim_to_hwtstamp - convert system time value to HW timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * Returns 0 on success. + **/ +static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + switch (adapter->hw.mac.type) { + case igc_i225: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + return -EINVAL; + } + return 0; +} + +/** + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer + * @adapter: Pointer to adapter the packet buffer belongs to + * @buf: Pointer to packet buffer + * + * This function retrieves the timestamp saved in the beginning of packet + * buffer. While two timestamps are available, one in timer0 reference and the + * other in timer1 reference, this function considers only the timestamp in + * timer0 reference. + * + * Returns timestamp value. + */ +ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf) +{ + ktime_t timestamp; + u32 secs, nsecs; + int adjust; + + /* Timestamps are saved in little endian at the beginning of the packet + * buffer following the layout: + * + * DWORD: | 0 | 1 | 2 | 3 | + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | + * + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds + * part of the timestamp. + */ + nsecs = le32_to_cpu(buf[2]); + secs = le32_to_cpu(buf[3]); + + timestamp = ktime_set(secs, nsecs); + + /* Adjust timestamp for the RX latency based on link speed */ + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_RX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_RX_LATENCY_2500; + break; + default: + adjust = 0; + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); + break; + } + + return ktime_sub_ns(timestamp, adjust); +} + +static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + wr32(IGC_TSYNCRXCTL, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + val &= ~IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = rd32(IGC_RXPBS); + val &= ~IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); +} + +static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + int i; + + val = rd32(IGC_RXPBS); + val |= IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); + + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + /* FIXME: For now, only support retrieving RX timestamps from + * timer 0. + */ + val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) | + IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + + val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL | + IGC_TSYNCRXCTL_RXSYNSIG; + wr32(IGC_TSYNCRXCTL, val); +} + +static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; + + dev_kfree_skb_any(tstamp->skb); + tstamp->skb = NULL; + } + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + /* Clear the flags first to avoid new packets to be enqueued + * for TX timestamping. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + clear_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags); + } + + /* Now we can clean the pending TX timestamp requests. */ + igc_ptp_clear_tx_tstamp(adapter); + + wr32(IGC_TSYNCTXCTL, 0); +} + +static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + int i; + + wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG); + + /* Read TXSTMP registers to discard any timestamp previously stored. */ + rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); + + /* The hardware is ready to accept TX timestamp requests, + * notify the transmit path. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + set_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags); + } + +} + +/** + * igc_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Return: 0 in case of success, negative errno code otherwise. + */ +static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, + struct hwtstamp_config *config) +{ + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + igc_ptp_disable_tx_timestamp(adapter); + break; + case HWTSTAMP_TX_ON: + igc_ptp_enable_tx_timestamp(adapter); + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + igc_ptp_disable_rx_timestamp(adapter); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + igc_ptp_enable_rx_timestamp(adapter); + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + return 0; +} + +/* Requires adapter->ptp_tx_lock held by caller. */ +static void igc_ptp_tx_timeout(struct igc_adapter *adapter, + struct igc_tx_timestamp_request *tstamp) +{ + dev_kfree_skb_any(tstamp->skb); + tstamp->skb = NULL; + adapter->tx_hwtstamp_timeouts++; + + netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); +} + +void igc_ptp_tx_hang(struct igc_adapter *adapter) +{ + struct igc_tx_timestamp_request *tstamp; + struct igc_hw *hw = &adapter->hw; + unsigned long flags; + bool found = false; + int i; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + tstamp = &adapter->tx_tstamp[i]; + + if (!tstamp->skb) + continue; + + if (time_is_after_jiffies(tstamp->start + IGC_PTP_TX_TIMEOUT)) + continue; + + igc_ptp_tx_timeout(adapter, tstamp); + found = true; + } + + if (found) { + /* Reading the high register of the first set of timestamp registers + * clears all the equivalent bits in the TSYNCTXCTL register. + */ + rd32(IGC_TXSTMPH_0); + } + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter, + struct igc_tx_timestamp_request *tstamp, u64 regval) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff *skb; + int adjust = 0; + + skb = tstamp->skb; + if (!skb) + return; + + if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_TX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_TX_LATENCY_2500; + break; + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + + tstamp->skb = NULL; + + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); +} + +/** + * igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure + * + * Check against the ready mask for which of the timestamp register + * sets are ready to be retrieved, then retrieve that and notify the + * rest of the stack. + * + * Context: Expects adapter->ptp_tx_lock to be held by caller. + */ +static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u64 regval; + u32 mask; + int i; + + mask = rd32(IGC_TSYNCTXCTL) & IGC_TSYNCTXCTL_TXTT_ANY; + if (mask & IGC_TSYNCTXCTL_TXTT_0) { + regval = rd32(IGC_TXSTMPL); + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + } else { + /* There's a bug in the hardware that could cause + * missing interrupts for TX timestamping. The issue + * is that for new interrupts to be triggered, the + * IGC_TXSTMPH_0 register must be read. + * + * To avoid discarding a valid timestamp that just + * happened at the "wrong" time, we need to confirm + * that there was no timestamp captured, we do that by + * assuming that no two timestamps in sequence have + * the same nanosecond value. + * + * So, we read the "low" register, read the "high" + * register (to latch a new timestamp) and read the + * "low" register again, if "old" and "new" versions + * of the "low" register are different, a valid + * timestamp was captured, we can read the "high" + * register again. + */ + u32 txstmpl_old, txstmpl_new; + + txstmpl_old = rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); + txstmpl_new = rd32(IGC_TXSTMPL); + + if (txstmpl_old == txstmpl_new) + goto done; + + regval = txstmpl_new; + regval |= (u64)rd32(IGC_TXSTMPH) << 32; + } + + igc_ptp_tx_reg_to_stamp(adapter, &adapter->tx_tstamp[0], regval); + +done: + /* Now that the problematic first register was handled, we can + * use retrieve the timestamps from the other registers + * (starting from '1') with less complications. + */ + for (i = 1; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; + + if (!(tstamp->mask & mask)) + continue; + + regval = rd32(tstamp->regl); + regval |= (u64)rd32(tstamp->regh) << 32; + + igc_ptp_tx_reg_to_stamp(adapter, tstamp, regval); + } +} + +/** + * igc_ptp_tx_tstamp_event + * @adapter: board private structure + * + * Called when a TX timestamp interrupt happens to retrieve the + * timestamp and send it up to the socket. + */ +void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + igc_ptp_tx_hwtstamp(adapter); + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + +/** + * igc_ptp_set_ts_config - set hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + **/ +int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igc_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * igc_ptp_get_ts_config - get hardware time stamping config + * @netdev: network interface device structure + * @ifr: interface request data + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/* The two conditions below must be met for cross timestamping via + * PCIe PTM: + * + * 1. We have an way to convert the timestamps in the PTM messages + * to something related to the system clocks (right now, only + * X86 systems with support for the Always Running Timer allow that); + * + * 2. We have PTM enabled in the path from the device to the PCIe root port. + */ +static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) +{ + if (!IS_ENABLED(CONFIG_X86_TSC)) + return false; + + /* FIXME: it was noticed that enabling support for PCIe PTM in + * some i225-V models could cause lockups when bringing the + * interface up/down. There should be no downsides to + * disabling crosstimestamping support for i225-V, as it + * doesn't have any PTP support. That way we gain some time + * while root causing the issue. + */ + if (adapter->pdev->device == IGC_DEV_ID_I225_V) + return false; + + return pcie_ptm_enabled(adapter->pdev); +} + +static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) +{ +#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML) + return convert_art_ns_to_tsc(tstamp); +#else + return (struct system_counterval_t) { }; +#endif +} + +static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) +{ + struct net_device *netdev = adapter->netdev; + + switch (ptm_stat) { + case IGC_PTM_STAT_RET_ERR: + netdev_err(netdev, "PTM Error: Root port timeout\n"); + break; + case IGC_PTM_STAT_BAD_PTM_RES: + netdev_err(netdev, "PTM Error: Bad response, PTM Response Data expected\n"); + break; + case IGC_PTM_STAT_T4M1_OVFL: + netdev_err(netdev, "PTM Error: T4 minus T1 overflow\n"); + break; + case IGC_PTM_STAT_ADJUST_1ST: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during first PTM cycle\n"); + break; + case IGC_PTM_STAT_ADJUST_CYC: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during non-first PTM cycle\n"); + break; + default: + netdev_err(netdev, "PTM Error: Unknown error (%#x)\n", ptm_stat); + break; + } +} + +static int igc_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + u32 stat, t2_curr_h, t2_curr_l, ctrl; + struct igc_adapter *adapter = ctx; + struct igc_hw *hw = &adapter->hw; + int err, count = 100; + ktime_t t1, t2_curr; + + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); + + do { + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ + + /* To "manually" start the PTM cycle we need to clear and + * then set again the TRIG bit. + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + + /* The cycle only starts "for real" when software notifies + * that it has read the registers, this is done by setting + * VALID bit. + */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT); + if (err < 0) { + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + return err; + } + + if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) + break; + + if (stat & ~IGC_PTM_STAT_VALID) { + /* An error occurred, log it. */ + igc_ptm_log_error(adapter, stat); + /* The STAT register is write-1-to-clear (W1C), + * so write the previous error status to clear it. + */ + wr32(IGC_PTM_STAT, stat); + continue; + } + } while (--count); + + if (!count) { + netdev_err(adapter->netdev, "Exceeded number of tries for PTM cycle\n"); + return -ETIMEDOUT; + } + + t1 = ktime_set(rd32(IGC_PTM_T1_TIM0_H), rd32(IGC_PTM_T1_TIM0_L)); + + t2_curr_l = rd32(IGC_PTM_CURR_T2_L); + t2_curr_h = rd32(IGC_PTM_CURR_T2_H); + + /* FIXME: When the register that tells the endianness of the + * PTM registers are implemented, check them here and add the + * appropriate conversion. + */ + t2_curr_h = swab32(t2_curr_h); + + t2_curr = ((s64)t2_curr_h << 32 | t2_curr_l); + + *device = t1; + *system = igc_device_tstamp_to_system(t2_curr); + + return 0; +} + +static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) +{ + struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, + ptp_caps); + + return get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); +} + +/** + * igc_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ +void igc_ptp_init(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct igc_tx_timestamp_request *tstamp; + struct igc_hw *hw = &adapter->hw; + int i; + + tstamp = &adapter->tx_tstamp[0]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_0; + tstamp->regl = IGC_TXSTMPL_0; + tstamp->regh = IGC_TXSTMPH_0; + tstamp->flags = 0; + + tstamp = &adapter->tx_tstamp[1]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_1; + tstamp->regl = IGC_TXSTMPL_1; + tstamp->regh = IGC_TXSTMPH_1; + tstamp->flags = IGC_TX_FLAGS_TSTAMP_1; + + tstamp = &adapter->tx_tstamp[2]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_2; + tstamp->regl = IGC_TXSTMPL_2; + tstamp->regh = IGC_TXSTMPH_2; + tstamp->flags = IGC_TX_FLAGS_TSTAMP_2; + + tstamp = &adapter->tx_tstamp[3]; + tstamp->mask = IGC_TSYNCTXCTL_TXTT_3; + tstamp->regl = IGC_TXSTMPL_3; + tstamp->regh = IGC_TXSTMPH_3; + tstamp->flags = IGC_TX_FLAGS_TSTAMP_3; + + switch (hw->mac.type) { + case igc_i225: + for (i = 0; i < IGC_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225; + adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225; + adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225; + adapter->ptp_caps.settime64 = igc_ptp_settime_i225; + adapter->ptp_caps.enable = igc_ptp_feature_enable_i225; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; + adapter->ptp_caps.n_per_out = IGC_N_PEROUT; + adapter->ptp_caps.n_pins = IGC_N_SDP; + adapter->ptp_caps.verify = igc_ptp_verify_pin; + + if (!igc_is_crosststamp_supported(adapter)) + break; + + adapter->ptp_caps.getcrosststamp = igc_ptp_getcrosststamp; + break; + default: + adapter->ptp_clock = NULL; + return; + } + + spin_lock_init(&adapter->ptp_tx_lock); + spin_lock_init(&adapter->tmreg_lock); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real()); + adapter->ptp_reset_start = ktime_get(); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + netdev_err(netdev, "ptp_clock_register failed\n"); + } else if (adapter->ptp_clock) { + netdev_info(netdev, "PHC added\n"); + adapter->ptp_flags |= IGC_PTP_ENABLED; + } +} + +static void igc_ptp_time_save(struct igc_adapter *adapter) +{ + igc_ptp_read(adapter, &adapter->prev_ptp_time); + adapter->ptp_reset_start = ktime_get(); +} + +static void igc_ptp_time_restore(struct igc_adapter *adapter) +{ + struct timespec64 ts = adapter->prev_ptp_time; + ktime_t delta; + + delta = ktime_sub(ktime_get(), adapter->ptp_reset_start); + + timespec64_add_ns(&ts, ktime_to_ns(delta)); + + igc_ptp_write_i225(adapter, &ts); +} + +static void igc_ptm_stop(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_EN; + + wr32(IGC_PTM_CTRL, ctrl); +} + +/** + * igc_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure + * + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igc_ptp_suspend(struct igc_adapter *adapter) +{ + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + + igc_ptp_clear_tx_tstamp(adapter); + + if (pci_device_is_present(adapter->pdev)) { + igc_ptp_time_save(adapter); + igc_ptm_stop(adapter); + } +} + +/** + * igc_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igc_ptp_stop(struct igc_adapter *adapter) +{ + igc_ptp_suspend(adapter); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + netdev_info(adapter->netdev, "PHC removed\n"); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; + } +} + +/** + * igc_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igc_ptp_reset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 cycle_ctrl, ctrl; + unsigned long flags; + u32 timadj; + + /* reset the tstamp_config */ + igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case igc_i225: + timadj = rd32(IGC_TIMADJ); + timadj |= IGC_TIMADJ_ADJUST_METH; + wr32(IGC_TIMADJ, timadj); + + wr32(IGC_TSAUXC, 0x0); + wr32(IGC_TSSDP, 0x0); + wr32(IGC_TSIM, + IGC_TSICR_INTERRUPTS | + (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); + wr32(IGC_IMS, IGC_IMS_TS); + + if (!igc_is_crosststamp_supported(adapter)) + break; + + wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); + wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); + + cycle_ctrl = IGC_PTM_CYCLE_CTRL_CYC_TIME(IGC_PTM_CYC_TIME_DEFAULT); + + wr32(IGC_PTM_CYCLE_CTRL, cycle_ctrl); + + ctrl = IGC_PTM_CTRL_EN | + IGC_PTM_CTRL_START_NOW | + IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | + IGC_PTM_CTRL_TRIG; + + wr32(IGC_PTM_CTRL, ctrl); + + /* Force the first cycle to run. */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if (hw->mac.type == igc_i225) { + igc_ptp_time_restore(adapter); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); +} diff --git a/devices/igc/igc_regs-6.4-ethercat.h b/devices/igc/igc_regs-6.4-ethercat.h new file mode 100644 index 00000000..dba5a575 --- /dev/null +++ b/devices/igc/igc_regs-6.4-ethercat.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_REGS_H_ +#define _IGC_REGS_H_ + +/* General Register Descriptions */ +#define IGC_CTRL 0x00000 /* Device Control - RW */ +#define IGC_STATUS 0x00008 /* Device Status - RO */ +#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define IGC_MDIC 0x00020 /* MDI Control - RW */ +#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define IGC_VET 0x00038 /* VLAN Ether Type - RW */ +#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */ +#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */ + +/* Internal Packet Buffer Size Registers */ +#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +/* NVM Register Descriptions */ +#define IGC_EERD 0x12014 /* EEprom mode read - RW */ +#define IGC_EEWR 0x12018 /* EEprom mode write - RW */ + +/* Flow Control Register Descriptions */ +#define IGC_FCAL 0x00028 /* FC Address Low - RW */ +#define IGC_FCAH 0x0002C /* FC Address High - RW */ +#define IGC_FCT 0x00030 /* FC Type - RW */ +#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */ +#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */ +#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */ +#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */ + +/* Semaphore registers */ +#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define IGC_SWSM 0x05B50 /* SW Semaphore */ +#define IGC_FWSM 0x05B54 /* FW Semaphore */ + +/* Function Active and Power State to MNG */ +#define IGC_FACTPS 0x05B30 + +/* Interrupt Register Description */ +#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */ +#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */ +#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */ +#define IGC_ICS 0x01504 /* Intr Cause Set - WO */ +#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */ +#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */ +#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */ +/* Intr Throttle - RW */ +#define IGC_EITR(_n) (0x01680 + (0x4 * (_n))) +/* Interrupt Vector Allocation - RW */ +#define IGC_IVAR0 0x01700 +#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ + +/* RSS registers */ +#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ + +/* Filtering Registers */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ +#define IGC_FHFT(_n) (0x09000 + (256 * (_n))) /* Flexible Host Filter */ +#define IGC_FHFT_EXT(_n) (0x09A00 + (256 * (_n))) /* Flexible Host Filter Extended */ +#define IGC_FHFTSL 0x05804 /* Flex Filter indirect table select */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE BIT(26) +#define IGC_ETQF_QUEUE_ENABLE BIT(31) +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK 0x00070000 +#define IGC_ETQF_ETYPE_MASK 0x0000FFFF + +/* FHFT register bit definitions */ +#define IGC_FHFT_LENGTH_MASK GENMASK(7, 0) +#define IGC_FHFT_QUEUE_SHIFT 8 +#define IGC_FHFT_QUEUE_MASK GENMASK(10, 8) +#define IGC_FHFT_PRIO_SHIFT 16 +#define IGC_FHFT_PRIO_MASK GENMASK(18, 16) +#define IGC_FHFT_IMM_INT BIT(24) +#define IGC_FHFT_DROP BIT(25) + +/* FHFTSL register bit definitions */ +#define IGC_FHFTSL_FTSL_SHIFT 0 +#define IGC_FHFTSL_FTSL_MASK GENMASK(1, 0) + +/* Redirection Table - RW Array */ +#define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4)) + +/* Receive Register Descriptions */ +#define IGC_RCTL 0x00100 /* Rx Control - RW */ +#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) +#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40)) +#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40)) +#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40)) +#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40)) +#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40)) +#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40)) +#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) +#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define IGC_RFCTL 0x05008 /* Receive Filter Control*/ +#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define IGC_RA 0x05400 /* Receive Address - RW Array */ +#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ +#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) +#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) +#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */ + +/* Transmit Register Descriptions */ +#define IGC_TCTL 0x00400 /* Tx Control - RW */ +#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */ +#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40)) +#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40)) +#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40)) +#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40)) +#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40)) +#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +/* MMD Register Descriptions */ +#define IGC_MMDAC 13 /* MMD Access Control */ +#define IGC_MMDAAD 14 /* MMD Access Address/Data */ + +/* Statistics Register Descriptions */ +#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ +#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define IGC_COLC 0x04028 /* Collision Count - R/clr */ +#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */ +#define IGC_DC 0x04030 /* Defer Count - R/clr */ +#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */ +#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define IGC_IAC 0x04100 /* Interrupt Assertion Count */ +#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ +#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ +#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */ +#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define IGC_LENERRS 0x04138 /* Length Errors Count */ + +/* Time sync registers */ +#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */ +#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */ +#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define IGC_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define IGC_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define IGC_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ + +#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ + +#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ + +/* Transmit Scheduling Registers */ +#define IGC_TQAVCTRL 0x3570 +#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n)) +#define IGC_GTXOFFSET 0x3310 +#define IGC_BASET_L 0x3314 +#define IGC_BASET_H 0x3318 +#define IGC_QBVCYCLET 0x331C +#define IGC_QBVCYCLET_S 0x3320 + +#define IGC_STQT(_n) (0x3324 + 0x4 * (_n)) +#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n)) +#define IGC_DTXMXPKTSZ 0x355C + +#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + +/* System Time Registers */ +#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ +#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ +#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ + +#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ + +#define IGC_TIMADJ 0x0B60C /* Time Adjustment Offset Register */ + +/* PCIe Registers */ +#define IGC_PTM_CTRL 0x12540 /* PTM Control */ +#define IGC_PTM_STAT 0x12544 /* PTM Status */ +#define IGC_PTM_CYCLE_CTRL 0x1254C /* PTM Cycle Control */ + +/* PTM Time registers */ +#define IGC_PTM_T1_TIM0_L 0x12558 /* T1 on Timer 0 Low */ +#define IGC_PTM_T1_TIM0_H 0x1255C /* T1 on Timer 0 High */ + +#define IGC_PTM_CURR_T2_L 0x1258C /* Current T2 Low */ +#define IGC_PTM_CURR_T2_H 0x12590 /* Current T2 High */ +#define IGC_PTM_PREV_T2_L 0x12584 /* Previous T2 Low */ +#define IGC_PTM_PREV_T2_H 0x12588 /* Previous T2 High */ +#define IGC_PTM_PREV_T4M1 0x12578 /* T4 Minus T1 on previous PTM Cycle */ +#define IGC_PTM_CURR_T4M1 0x1257C /* T4 Minus T1 on this PTM Cycle */ +#define IGC_PTM_PREV_T3M2 0x12580 /* T3 Minus T2 on previous PTM Cycle */ +#define IGC_PTM_TDELAY 0x12594 /* PTM PCIe Link Delay */ + +#define IGC_PCIE_DIG_DELAY 0x12550 /* PCIe Digital Delay */ +#define IGC_PCIE_PHY_DELAY 0x12554 /* PCIe PHY Delay */ + +/* Management registers */ +#define IGC_MANC 0x05820 /* Management Control - RW */ + +/* Shadow Ram Write Register - RW */ +#define IGC_SRWR 0x12018 + +/* Wake Up registers */ +#define IGC_WUC 0x05800 /* Wakeup Control - RW */ +#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Register Extended - RW */ + +/* Wake Up packet memory */ +#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) + +/* Energy Efficient Ethernet "EEE" registers */ +#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define IGC_EEE_SU 0x0E34 /* EEE Setup */ + +/* LTR registers */ +#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */ +#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */ + +/* forward declaration */ +struct igc_hw; +u32 igc_rd32(struct igc_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!IGC_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igc_rd32(hw, reg)) + +#define wrfl() ((void)rd32(IGC_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) + +#define IGC_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/devices/igc/igc_regs-6.4-orig.h b/devices/igc/igc_regs-6.4-orig.h new file mode 100644 index 00000000..dba5a575 --- /dev/null +++ b/devices/igc/igc_regs-6.4-orig.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_REGS_H_ +#define _IGC_REGS_H_ + +/* General Register Descriptions */ +#define IGC_CTRL 0x00000 /* Device Control - RW */ +#define IGC_STATUS 0x00008 /* Device Status - RO */ +#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define IGC_MDIC 0x00020 /* MDI Control - RW */ +#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define IGC_VET 0x00038 /* VLAN Ether Type - RW */ +#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */ +#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */ + +/* Internal Packet Buffer Size Registers */ +#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +/* NVM Register Descriptions */ +#define IGC_EERD 0x12014 /* EEprom mode read - RW */ +#define IGC_EEWR 0x12018 /* EEprom mode write - RW */ + +/* Flow Control Register Descriptions */ +#define IGC_FCAL 0x00028 /* FC Address Low - RW */ +#define IGC_FCAH 0x0002C /* FC Address High - RW */ +#define IGC_FCT 0x00030 /* FC Type - RW */ +#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */ +#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */ +#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */ +#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */ + +/* Semaphore registers */ +#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define IGC_SWSM 0x05B50 /* SW Semaphore */ +#define IGC_FWSM 0x05B54 /* FW Semaphore */ + +/* Function Active and Power State to MNG */ +#define IGC_FACTPS 0x05B30 + +/* Interrupt Register Description */ +#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */ +#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */ +#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */ +#define IGC_ICS 0x01504 /* Intr Cause Set - WO */ +#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */ +#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */ +#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */ +/* Intr Throttle - RW */ +#define IGC_EITR(_n) (0x01680 + (0x4 * (_n))) +/* Interrupt Vector Allocation - RW */ +#define IGC_IVAR0 0x01700 +#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ + +/* RSS registers */ +#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ + +/* Filtering Registers */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ +#define IGC_FHFT(_n) (0x09000 + (256 * (_n))) /* Flexible Host Filter */ +#define IGC_FHFT_EXT(_n) (0x09A00 + (256 * (_n))) /* Flexible Host Filter Extended */ +#define IGC_FHFTSL 0x05804 /* Flex Filter indirect table select */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE BIT(26) +#define IGC_ETQF_QUEUE_ENABLE BIT(31) +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK 0x00070000 +#define IGC_ETQF_ETYPE_MASK 0x0000FFFF + +/* FHFT register bit definitions */ +#define IGC_FHFT_LENGTH_MASK GENMASK(7, 0) +#define IGC_FHFT_QUEUE_SHIFT 8 +#define IGC_FHFT_QUEUE_MASK GENMASK(10, 8) +#define IGC_FHFT_PRIO_SHIFT 16 +#define IGC_FHFT_PRIO_MASK GENMASK(18, 16) +#define IGC_FHFT_IMM_INT BIT(24) +#define IGC_FHFT_DROP BIT(25) + +/* FHFTSL register bit definitions */ +#define IGC_FHFTSL_FTSL_SHIFT 0 +#define IGC_FHFTSL_FTSL_MASK GENMASK(1, 0) + +/* Redirection Table - RW Array */ +#define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4)) + +/* Receive Register Descriptions */ +#define IGC_RCTL 0x00100 /* Rx Control - RW */ +#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) +#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40)) +#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40)) +#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40)) +#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40)) +#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40)) +#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40)) +#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) +#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define IGC_RFCTL 0x05008 /* Receive Filter Control*/ +#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define IGC_RA 0x05400 /* Receive Address - RW Array */ +#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ +#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) +#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) +#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */ + +/* Transmit Register Descriptions */ +#define IGC_TCTL 0x00400 /* Tx Control - RW */ +#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */ +#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40)) +#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40)) +#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40)) +#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40)) +#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40)) +#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +/* MMD Register Descriptions */ +#define IGC_MMDAC 13 /* MMD Access Control */ +#define IGC_MMDAAD 14 /* MMD Access Address/Data */ + +/* Statistics Register Descriptions */ +#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ +#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define IGC_COLC 0x04028 /* Collision Count - R/clr */ +#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */ +#define IGC_DC 0x04030 /* Defer Count - R/clr */ +#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */ +#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define IGC_IAC 0x04100 /* Interrupt Assertion Count */ +#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ +#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ +#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */ +#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define IGC_LENERRS 0x04138 /* Length Errors Count */ + +/* Time sync registers */ +#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */ +#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */ +#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define IGC_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define IGC_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define IGC_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ + +#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ + +#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ + +/* Transmit Scheduling Registers */ +#define IGC_TQAVCTRL 0x3570 +#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n)) +#define IGC_GTXOFFSET 0x3310 +#define IGC_BASET_L 0x3314 +#define IGC_BASET_H 0x3318 +#define IGC_QBVCYCLET 0x331C +#define IGC_QBVCYCLET_S 0x3320 + +#define IGC_STQT(_n) (0x3324 + 0x4 * (_n)) +#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n)) +#define IGC_DTXMXPKTSZ 0x355C + +#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + +/* System Time Registers */ +#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ +#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ +#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ + +#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ + +#define IGC_TIMADJ 0x0B60C /* Time Adjustment Offset Register */ + +/* PCIe Registers */ +#define IGC_PTM_CTRL 0x12540 /* PTM Control */ +#define IGC_PTM_STAT 0x12544 /* PTM Status */ +#define IGC_PTM_CYCLE_CTRL 0x1254C /* PTM Cycle Control */ + +/* PTM Time registers */ +#define IGC_PTM_T1_TIM0_L 0x12558 /* T1 on Timer 0 Low */ +#define IGC_PTM_T1_TIM0_H 0x1255C /* T1 on Timer 0 High */ + +#define IGC_PTM_CURR_T2_L 0x1258C /* Current T2 Low */ +#define IGC_PTM_CURR_T2_H 0x12590 /* Current T2 High */ +#define IGC_PTM_PREV_T2_L 0x12584 /* Previous T2 Low */ +#define IGC_PTM_PREV_T2_H 0x12588 /* Previous T2 High */ +#define IGC_PTM_PREV_T4M1 0x12578 /* T4 Minus T1 on previous PTM Cycle */ +#define IGC_PTM_CURR_T4M1 0x1257C /* T4 Minus T1 on this PTM Cycle */ +#define IGC_PTM_PREV_T3M2 0x12580 /* T3 Minus T2 on previous PTM Cycle */ +#define IGC_PTM_TDELAY 0x12594 /* PTM PCIe Link Delay */ + +#define IGC_PCIE_DIG_DELAY 0x12550 /* PCIe Digital Delay */ +#define IGC_PCIE_PHY_DELAY 0x12554 /* PCIe PHY Delay */ + +/* Management registers */ +#define IGC_MANC 0x05820 /* Management Control - RW */ + +/* Shadow Ram Write Register - RW */ +#define IGC_SRWR 0x12018 + +/* Wake Up registers */ +#define IGC_WUC 0x05800 /* Wakeup Control - RW */ +#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Register Extended - RW */ + +/* Wake Up packet memory */ +#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) + +/* Energy Efficient Ethernet "EEE" registers */ +#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define IGC_EEE_SU 0x0E34 /* EEE Setup */ + +/* LTR registers */ +#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */ +#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */ + +/* forward declaration */ +struct igc_hw; +u32 igc_rd32(struct igc_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!IGC_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igc_rd32(hw, reg)) + +#define wrfl() ((void)rd32(IGC_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) + +#define IGC_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/devices/igc/igc_regs-6.6-ethercat.h b/devices/igc/igc_regs-6.6-ethercat.h new file mode 100644 index 00000000..20e17f5f --- /dev/null +++ b/devices/igc/igc_regs-6.6-ethercat.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_REGS_H_ +#define _IGC_REGS_H_ + +/* General Register Descriptions */ +#define IGC_CTRL 0x00000 /* Device Control - RW */ +#define IGC_STATUS 0x00008 /* Device Status - RO */ +#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define IGC_MDIC 0x00020 /* MDI Control - RW */ +#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define IGC_VET 0x00038 /* VLAN Ether Type - RW */ +#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */ +#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */ + +/* Internal Packet Buffer Size Registers */ +#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +/* NVM Register Descriptions */ +#define IGC_EERD 0x12014 /* EEprom mode read - RW */ +#define IGC_EEWR 0x12018 /* EEprom mode write - RW */ + +/* Flow Control Register Descriptions */ +#define IGC_FCAL 0x00028 /* FC Address Low - RW */ +#define IGC_FCAH 0x0002C /* FC Address High - RW */ +#define IGC_FCT 0x00030 /* FC Type - RW */ +#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */ +#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */ +#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */ +#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */ + +/* Semaphore registers */ +#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define IGC_SWSM 0x05B50 /* SW Semaphore */ +#define IGC_FWSM 0x05B54 /* FW Semaphore */ + +/* Function Active and Power State to MNG */ +#define IGC_FACTPS 0x05B30 + +/* Interrupt Register Description */ +#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */ +#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */ +#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */ +#define IGC_ICS 0x01504 /* Intr Cause Set - WO */ +#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */ +#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */ +#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */ +/* Intr Throttle - RW */ +#define IGC_EITR(_n) (0x01680 + (0x4 * (_n))) +/* Interrupt Vector Allocation - RW */ +#define IGC_IVAR0 0x01700 +#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ + +/* RSS registers */ +#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ + +/* Filtering Registers */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ +#define IGC_FHFT(_n) (0x09000 + (256 * (_n))) /* Flexible Host Filter */ +#define IGC_FHFT_EXT(_n) (0x09A00 + (256 * (_n))) /* Flexible Host Filter Extended */ +#define IGC_FHFTSL 0x05804 /* Flex Filter indirect table select */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE BIT(26) +#define IGC_ETQF_QUEUE_ENABLE BIT(31) +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK 0x00070000 +#define IGC_ETQF_ETYPE_MASK 0x0000FFFF + +/* FHFT register bit definitions */ +#define IGC_FHFT_LENGTH_MASK GENMASK(7, 0) +#define IGC_FHFT_QUEUE_SHIFT 8 +#define IGC_FHFT_QUEUE_MASK GENMASK(10, 8) +#define IGC_FHFT_PRIO_SHIFT 16 +#define IGC_FHFT_PRIO_MASK GENMASK(18, 16) +#define IGC_FHFT_IMM_INT BIT(24) +#define IGC_FHFT_DROP BIT(25) + +/* FHFTSL register bit definitions */ +#define IGC_FHFTSL_FTSL_SHIFT 0 +#define IGC_FHFTSL_FTSL_MASK GENMASK(1, 0) + +/* Redirection Table - RW Array */ +#define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4)) + +/* Receive Register Descriptions */ +#define IGC_RCTL 0x00100 /* Rx Control - RW */ +#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) +#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40)) +#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40)) +#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40)) +#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40)) +#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40)) +#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40)) +#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) +#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define IGC_RFCTL 0x05008 /* Receive Filter Control*/ +#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define IGC_RA 0x05400 /* Receive Address - RW Array */ +#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ +#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) +#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) +#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */ + +/* Transmit Register Descriptions */ +#define IGC_TCTL 0x00400 /* Tx Control - RW */ +#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */ +#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40)) +#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40)) +#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40)) +#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40)) +#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40)) +#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +/* MMD Register Descriptions */ +#define IGC_MMDAC 13 /* MMD Access Control */ +#define IGC_MMDAAD 14 /* MMD Access Address/Data */ + +/* Statistics Register Descriptions */ +#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ +#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define IGC_COLC 0x04028 /* Collision Count - R/clr */ +#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */ +#define IGC_DC 0x04030 /* Defer Count - R/clr */ +#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */ +#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define IGC_IAC 0x04100 /* Interrupt Assertion Count */ +#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ +#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ +#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */ +#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define IGC_LENERRS 0x04138 /* Length Errors Count */ + +/* Time sync registers */ +#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */ +#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */ +#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define IGC_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define IGC_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define IGC_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ + +#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ + +#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ + +/* Transmit Scheduling Registers */ +#define IGC_TQAVCTRL 0x3570 +#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n)) +#define IGC_GTXOFFSET 0x3310 +#define IGC_BASET_L 0x3314 +#define IGC_BASET_H 0x3318 +#define IGC_QBVCYCLET 0x331C +#define IGC_QBVCYCLET_S 0x3320 + +#define IGC_STQT(_n) (0x3324 + 0x4 * (_n)) +#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n)) +#define IGC_DTXMXPKTSZ 0x355C + +#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + +/* System Time Registers */ +#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ +#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ +#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ + +/* TX Timestamp Low */ +#define IGC_TXSTMPL_0 0x0B618 +#define IGC_TXSTMPL_1 0x0B698 +#define IGC_TXSTMPL_2 0x0B6B8 +#define IGC_TXSTMPL_3 0x0B6D8 + +/* TX Timestamp High */ +#define IGC_TXSTMPH_0 0x0B61C +#define IGC_TXSTMPH_1 0x0B69C +#define IGC_TXSTMPH_2 0x0B6BC +#define IGC_TXSTMPH_3 0x0B6DC + +#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ + +#define IGC_TIMADJ 0x0B60C /* Time Adjustment Offset Register */ + +/* PCIe Registers */ +#define IGC_PTM_CTRL 0x12540 /* PTM Control */ +#define IGC_PTM_STAT 0x12544 /* PTM Status */ +#define IGC_PTM_CYCLE_CTRL 0x1254C /* PTM Cycle Control */ + +/* PTM Time registers */ +#define IGC_PTM_T1_TIM0_L 0x12558 /* T1 on Timer 0 Low */ +#define IGC_PTM_T1_TIM0_H 0x1255C /* T1 on Timer 0 High */ + +#define IGC_PTM_CURR_T2_L 0x1258C /* Current T2 Low */ +#define IGC_PTM_CURR_T2_H 0x12590 /* Current T2 High */ +#define IGC_PTM_PREV_T2_L 0x12584 /* Previous T2 Low */ +#define IGC_PTM_PREV_T2_H 0x12588 /* Previous T2 High */ +#define IGC_PTM_PREV_T4M1 0x12578 /* T4 Minus T1 on previous PTM Cycle */ +#define IGC_PTM_CURR_T4M1 0x1257C /* T4 Minus T1 on this PTM Cycle */ +#define IGC_PTM_PREV_T3M2 0x12580 /* T3 Minus T2 on previous PTM Cycle */ +#define IGC_PTM_TDELAY 0x12594 /* PTM PCIe Link Delay */ + +#define IGC_PCIE_DIG_DELAY 0x12550 /* PCIe Digital Delay */ +#define IGC_PCIE_PHY_DELAY 0x12554 /* PCIe PHY Delay */ + +/* Management registers */ +#define IGC_MANC 0x05820 /* Management Control - RW */ + +/* Shadow Ram Write Register - RW */ +#define IGC_SRWR 0x12018 + +/* Wake Up registers */ +#define IGC_WUC 0x05800 /* Wakeup Control - RW */ +#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Register Extended - RW */ + +/* Wake Up packet memory */ +#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) + +/* Energy Efficient Ethernet "EEE" registers */ +#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define IGC_EEE_SU 0x0E34 /* EEE Setup */ + +/* LTR registers */ +#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */ +#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */ + +/* forward declaration */ +struct igc_hw; +u32 igc_rd32(struct igc_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!IGC_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igc_rd32(hw, reg)) + +#define wrfl() ((void)rd32(IGC_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) + +#define IGC_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/devices/igc/igc_regs-6.6-orig.h b/devices/igc/igc_regs-6.6-orig.h new file mode 100644 index 00000000..20e17f5f --- /dev/null +++ b/devices/igc/igc_regs-6.6-orig.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Intel Corporation */ + +#ifndef _IGC_REGS_H_ +#define _IGC_REGS_H_ + +/* General Register Descriptions */ +#define IGC_CTRL 0x00000 /* Device Control - RW */ +#define IGC_STATUS 0x00008 /* Device Status - RO */ +#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define IGC_MDIC 0x00020 /* MDI Control - RW */ +#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define IGC_VET 0x00038 /* VLAN Ether Type - RW */ +#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */ +#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */ + +/* Internal Packet Buffer Size Registers */ +#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +/* NVM Register Descriptions */ +#define IGC_EERD 0x12014 /* EEprom mode read - RW */ +#define IGC_EEWR 0x12018 /* EEprom mode write - RW */ + +/* Flow Control Register Descriptions */ +#define IGC_FCAL 0x00028 /* FC Address Low - RW */ +#define IGC_FCAH 0x0002C /* FC Address High - RW */ +#define IGC_FCT 0x00030 /* FC Type - RW */ +#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */ +#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */ +#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */ +#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */ + +/* Semaphore registers */ +#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define IGC_SWSM 0x05B50 /* SW Semaphore */ +#define IGC_FWSM 0x05B54 /* FW Semaphore */ + +/* Function Active and Power State to MNG */ +#define IGC_FACTPS 0x05B30 + +/* Interrupt Register Description */ +#define IGC_EICR 0x01580 /* Ext. Interrupt Cause read - W0 */ +#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */ +#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */ +#define IGC_ICS 0x01504 /* Intr Cause Set - WO */ +#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */ +#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */ +#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */ +/* Intr Throttle - RW */ +#define IGC_EITR(_n) (0x01680 + (0x4 * (_n))) +/* Interrupt Vector Allocation - RW */ +#define IGC_IVAR0 0x01700 +#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ + +/* RSS registers */ +#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ + +/* Filtering Registers */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ +#define IGC_FHFT(_n) (0x09000 + (256 * (_n))) /* Flexible Host Filter */ +#define IGC_FHFT_EXT(_n) (0x09A00 + (256 * (_n))) /* Flexible Host Filter Extended */ +#define IGC_FHFTSL 0x05804 /* Flex Filter indirect table select */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE BIT(26) +#define IGC_ETQF_QUEUE_ENABLE BIT(31) +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK 0x00070000 +#define IGC_ETQF_ETYPE_MASK 0x0000FFFF + +/* FHFT register bit definitions */ +#define IGC_FHFT_LENGTH_MASK GENMASK(7, 0) +#define IGC_FHFT_QUEUE_SHIFT 8 +#define IGC_FHFT_QUEUE_MASK GENMASK(10, 8) +#define IGC_FHFT_PRIO_SHIFT 16 +#define IGC_FHFT_PRIO_MASK GENMASK(18, 16) +#define IGC_FHFT_IMM_INT BIT(24) +#define IGC_FHFT_DROP BIT(25) + +/* FHFTSL register bit definitions */ +#define IGC_FHFTSL_FTSL_SHIFT 0 +#define IGC_FHFTSL_FTSL_MASK GENMASK(1, 0) + +/* Redirection Table - RW Array */ +#define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4)) + +/* Receive Register Descriptions */ +#define IGC_RCTL 0x00100 /* Rx Control - RW */ +#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) +#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40)) +#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40)) +#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40)) +#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40)) +#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40)) +#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40)) +#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) +#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define IGC_RFCTL 0x05008 /* Receive Filter Control*/ +#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define IGC_RA 0x05400 /* Receive Address - RW Array */ +#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ +#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) +#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) +#define IGC_VLANPQF 0x055B0 /* VLAN Priority Queue Filter - RW */ + +/* Transmit Register Descriptions */ +#define IGC_TCTL 0x00400 /* Tx Control - RW */ +#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */ +#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40)) +#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40)) +#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40)) +#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40)) +#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40)) +#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +/* MMD Register Descriptions */ +#define IGC_MMDAC 13 /* MMD Access Control */ +#define IGC_MMDAAD 14 /* MMD Access Address/Data */ + +/* Statistics Register Descriptions */ +#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */ +#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define IGC_COLC 0x04028 /* Collision Count - R/clr */ +#define IGC_RERC 0x0402C /* Receive Error Count - R/clr */ +#define IGC_DC 0x04030 /* Defer Count - R/clr */ +#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded by MAC - R/clr */ +#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define IGC_IAC 0x04100 /* Interrupt Assertion Count */ +#define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ +#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ +#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */ +#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define IGC_LENERRS 0x04138 /* Length Errors Count */ + +/* Time sync registers */ +#define IGC_TSICR 0x0B66C /* Time Sync Interrupt Cause */ +#define IGC_TSIM 0x0B674 /* Time Sync Interrupt Mask Register */ +#define IGC_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define IGC_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define IGC_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define IGC_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define IGC_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define IGC_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define IGC_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define IGC_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define IGC_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define IGC_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define IGC_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define IGC_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define IGC_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define IGC_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define IGC_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ + +#define IGC_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define IGC_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ + +#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ + +/* Transmit Scheduling Registers */ +#define IGC_TQAVCTRL 0x3570 +#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n)) +#define IGC_GTXOFFSET 0x3310 +#define IGC_BASET_L 0x3314 +#define IGC_BASET_H 0x3318 +#define IGC_QBVCYCLET 0x331C +#define IGC_QBVCYCLET_S 0x3320 + +#define IGC_STQT(_n) (0x3324 + 0x4 * (_n)) +#define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n)) +#define IGC_DTXMXPKTSZ 0x355C + +#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + +/* System Time Registers */ +#define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ +#define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ +#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ + +/* TX Timestamp Low */ +#define IGC_TXSTMPL_0 0x0B618 +#define IGC_TXSTMPL_1 0x0B698 +#define IGC_TXSTMPL_2 0x0B6B8 +#define IGC_TXSTMPL_3 0x0B6D8 + +/* TX Timestamp High */ +#define IGC_TXSTMPH_0 0x0B61C +#define IGC_TXSTMPH_1 0x0B69C +#define IGC_TXSTMPH_2 0x0B6BC +#define IGC_TXSTMPH_3 0x0B6DC + +#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ + +#define IGC_TIMADJ 0x0B60C /* Time Adjustment Offset Register */ + +/* PCIe Registers */ +#define IGC_PTM_CTRL 0x12540 /* PTM Control */ +#define IGC_PTM_STAT 0x12544 /* PTM Status */ +#define IGC_PTM_CYCLE_CTRL 0x1254C /* PTM Cycle Control */ + +/* PTM Time registers */ +#define IGC_PTM_T1_TIM0_L 0x12558 /* T1 on Timer 0 Low */ +#define IGC_PTM_T1_TIM0_H 0x1255C /* T1 on Timer 0 High */ + +#define IGC_PTM_CURR_T2_L 0x1258C /* Current T2 Low */ +#define IGC_PTM_CURR_T2_H 0x12590 /* Current T2 High */ +#define IGC_PTM_PREV_T2_L 0x12584 /* Previous T2 Low */ +#define IGC_PTM_PREV_T2_H 0x12588 /* Previous T2 High */ +#define IGC_PTM_PREV_T4M1 0x12578 /* T4 Minus T1 on previous PTM Cycle */ +#define IGC_PTM_CURR_T4M1 0x1257C /* T4 Minus T1 on this PTM Cycle */ +#define IGC_PTM_PREV_T3M2 0x12580 /* T3 Minus T2 on previous PTM Cycle */ +#define IGC_PTM_TDELAY 0x12594 /* PTM PCIe Link Delay */ + +#define IGC_PCIE_DIG_DELAY 0x12550 /* PCIe Digital Delay */ +#define IGC_PCIE_PHY_DELAY 0x12554 /* PCIe PHY Delay */ + +/* Management registers */ +#define IGC_MANC 0x05820 /* Management Control - RW */ + +/* Shadow Ram Write Register - RW */ +#define IGC_SRWR 0x12018 + +/* Wake Up registers */ +#define IGC_WUC 0x05800 /* Wakeup Control - RW */ +#define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */ +#define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Register Extended - RW */ + +/* Wake Up packet memory */ +#define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) + +/* Energy Efficient Ethernet "EEE" registers */ +#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define IGC_EEE_SU 0x0E34 /* EEE Setup */ + +/* LTR registers */ +#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */ +#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */ + +/* forward declaration */ +struct igc_hw; +u32 igc_rd32(struct igc_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!IGC_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igc_rd32(hw, reg)) + +#define wrfl() ((void)rd32(IGC_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2))) + +#define IGC_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/devices/igc/igc_tsn-6.4-ethercat.c b/devices/igc/igc_tsn-6.4-ethercat.c new file mode 100644 index 00000000..943d18b9 --- /dev/null +++ b/devices/igc/igc_tsn-6.4-ethercat.c @@ -0,0 +1,350 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc-6.4-ethercat.h" +#include "igc_hw-6.4-ethercat.h" +#include "igc_tsn-6.4-ethercat.h" + +static bool is_any_launchtime(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->launchtime_enable) + return true; + } + + return false; +} + +static bool is_cbs_enabled(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->cbs_enable) + return true; + } + + return false; +} + +static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) +{ + unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; + + if (adapter->taprio_offload_enable) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_any_launchtime(adapter)) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_cbs_enabled(adapter)) + new_flags |= IGC_FLAG_TSN_QAV_ENABLED; + + return new_flags; +} + +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u16 txoffset; + + if (!is_any_launchtime(adapter)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + txoffset = IGC_TXOFFSET_SPEED_10; + break; + case SPEED_100: + txoffset = IGC_TXOFFSET_SPEED_100; + break; + case SPEED_1000: + txoffset = IGC_TXOFFSET_SPEED_1000; + break; + case SPEED_2500: + txoffset = IGC_TXOFFSET_SPEED_2500; + break; + default: + txoffset = 0; + break; + } + + wr32(IGC_GTXOFFSET, txoffset); +} + +/* Returns the TSN specific registers to their default values after + * the adapter is reset. + */ +static int igc_tsn_disable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl; + int i; + + wr32(IGC_GTXOFFSET, 0); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | + IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS); + + wr32(IGC_TQAVCTRL, tqavctrl); + + for (i = 0; i < adapter->num_tx_queues; i++) { + wr32(IGC_TXQCTL(i), 0); + wr32(IGC_STQT(i), 0); + wr32(IGC_ENDQT(i), NSEC_PER_SEC); + } + + wr32(IGC_QBVCYCLET_S, 0); + wr32(IGC_QBVCYCLET, NSEC_PER_SEC); + + adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; + + return 0; +} + +static int igc_tsn_enable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl, baset_l, baset_h; + u32 sec, nsec, cycle; + ktime_t base_time, systim; + int i; + + wr32(IGC_TSAUXC, 0); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); + wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + u32 txqctl = 0; + u16 cbs_value; + u32 tqavcc; + + wr32(IGC_STQT(i), ring->start_time); + wr32(IGC_ENDQT(i), ring->end_time); + + if (adapter->taprio_offload_enable) { + /* If taprio_offload_enable is set we are in "taprio" + * mode and we need to be strict about the + * cycles: only transmit a packet if it can be + * completed during that cycle. + * + * If taprio_offload_enable is NOT true when + * enabling TSN offload, the cycle should have + * no external effects, but is only used internally + * to adapt the base time register after a second + * has passed. + * + * Enabling strict mode in this case would + * unnecessarily prevent the transmission of + * certain packets (i.e. at the boundary of a + * second) and thus interfere with the launchtime + * feature that promises transmission at a + * certain point in time. + */ + txqctl |= IGC_TXQCTL_STRICT_CYCLE | + IGC_TXQCTL_STRICT_END; + } + + if (ring->launchtime_enable) + txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + + /* Skip configuring CBS for Q2 and Q3 */ + if (i > 1) + goto skip_cbs; + + if (ring->cbs_enable) { + if (i == 0) + txqctl |= IGC_TXQCTL_QAV_SEL_CBS0; + else + txqctl |= IGC_TXQCTL_QAV_SEL_CBS1; + + /* According to i225 datasheet section 7.5.2.7, we + * should set the 'idleSlope' field from TQAVCC + * register following the equation: + * + * value = link-speed 0x7736 * BW * 0.2 + * ---------- * ----------------- (E1) + * 100Mbps 2.5 + * + * Note that 'link-speed' is in Mbps. + * + * 'BW' is the percentage bandwidth out of full + * link speed which can be found with the + * following equation. Note that idleSlope here + * is the parameter from this function + * which is in kbps. + * + * BW = idleSlope + * ----------------- (E2) + * link-speed * 1000 + * + * That said, we can come up with a generic + * equation to calculate the value we should set + * it TQAVCC register by replacing 'BW' in E1 by E2. + * The resulting equation is: + * + * value = link-speed * 0x7736 * idleSlope * 0.2 + * ------------------------------------- (E3) + * 100 * 2.5 * link-speed * 1000 + * + * 'link-speed' is present in both sides of the + * fraction so it is canceled out. The final + * equation is the following: + * + * value = idleSlope * 61036 + * ----------------- (E4) + * 2500000 + * + * NOTE: For i225, given the above, we can see + * that idleslope is represented in + * 40.959433 kbps units by the value at + * the TQAVCC register (2.5Gbps / 61036), + * which reduces the granularity for + * idleslope increments. + * + * In i225 controller, the sendSlope and loCredit + * parameters from CBS are not configurable + * by software so we don't do any + * 'controller configuration' in respect to + * these parameters. + */ + cbs_value = DIV_ROUND_UP_ULL(ring->idleslope + * 61036ULL, 2500000); + + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK; + tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS; + wr32(IGC_TQAVCC(i), tqavcc); + + wr32(IGC_TQAVHC(i), + 0x80000000 + ring->hicredit * 0x7735); + } else { + /* Disable any CBS for the queue */ + txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); + + /* Set idleSlope to zero. */ + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK | + IGC_TQAVCC_KEEP_CREDITS); + wr32(IGC_TQAVCC(i), tqavcc); + + /* Set hiCredit to zero. */ + wr32(IGC_TQAVHC(i), 0); + } +skip_cbs: + wr32(IGC_TXQCTL(i), txqctl); + } + + tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS; + + tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + + adapter->qbv_count++; + + cycle = adapter->cycle_time; + base_time = adapter->base_time; + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + systim = ktime_set(sec, nsec); + if (ktime_compare(systim, base_time) > 0) { + s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle); + + base_time = ktime_add_ns(base_time, (n + 1) * cycle); + + /* Increase the counter if scheduling into the past while + * Gate Control List (GCL) is running. + */ + if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) && + (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) && + (adapter->qbv_count > 1)) + adapter->qbv_config_change_errors++; + } else { + if (igc_is_device_id_i226(hw)) { + ktime_t adjust_time, expires_time; + + /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit + * has to be configured before the cycle time and base time. + * Tx won't hang if a GCL is already running, + * so in this case we don't need to set FutScdDis. + */ + if (!(rd32(IGC_BASET_H) || rd32(IGC_BASET_L))) + tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS; + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + systim = ktime_set(sec, nsec); + + adjust_time = adapter->base_time; + expires_time = ktime_sub_ns(adjust_time, systim); + hrtimer_start(&adapter->hrtimer, expires_time, HRTIMER_MODE_REL); + } + } + + wr32(IGC_TQAVCTRL, tqavctrl); + + wr32(IGC_QBVCYCLET_S, cycle); + wr32(IGC_QBVCYCLET, cycle); + + baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); + wr32(IGC_BASET_H, baset_h); + + /* In i226, Future base time is only supported when FutScdDis bit + * is enabled and only active for re-configuration. + * In this case, initialize the base time with zero to create + * "re-configuration" scenario then only set the desired base time. + */ + if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS) + wr32(IGC_BASET_L, 0); + wr32(IGC_BASET_L, baset_l); + + return 0; +} + +int igc_tsn_reset(struct igc_adapter *adapter) +{ + unsigned int new_flags; + int err = 0; + + new_flags = igc_tsn_new_flags(adapter); + + if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED)) + return igc_tsn_disable_offload(adapter); + + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; + + adapter->flags = new_flags; + + return err; +} + +int igc_tsn_offload_apply(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + /* Per I225/6 HW Design Section 7.5.2.1, transmit mode + * cannot be changed dynamically. Require reset the adapter. + */ + if (netif_running(adapter->netdev) && + (igc_is_device_id_i225(hw) || !adapter->qbv_count)) { + schedule_work(&adapter->reset_task); + return 0; + } + + igc_tsn_reset(adapter); + + return 0; +} diff --git a/devices/igc/igc_tsn-6.4-ethercat.h b/devices/igc/igc_tsn-6.4-ethercat.h new file mode 100644 index 00000000..b53e6af5 --- /dev/null +++ b/devices/igc/igc_tsn-6.4-ethercat.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +#ifndef _IGC_TSN_H_ +#define _IGC_TSN_H_ + +int igc_tsn_offload_apply(struct igc_adapter *adapter); +int igc_tsn_reset(struct igc_adapter *adapter); +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter); + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_tsn-6.4-orig.c b/devices/igc/igc_tsn-6.4-orig.c new file mode 100644 index 00000000..a9c08321 --- /dev/null +++ b/devices/igc/igc_tsn-6.4-orig.c @@ -0,0 +1,350 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc.h" +#include "igc_hw.h" +#include "igc_tsn.h" + +static bool is_any_launchtime(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->launchtime_enable) + return true; + } + + return false; +} + +static bool is_cbs_enabled(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->cbs_enable) + return true; + } + + return false; +} + +static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) +{ + unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; + + if (adapter->taprio_offload_enable) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_any_launchtime(adapter)) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_cbs_enabled(adapter)) + new_flags |= IGC_FLAG_TSN_QAV_ENABLED; + + return new_flags; +} + +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u16 txoffset; + + if (!is_any_launchtime(adapter)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + txoffset = IGC_TXOFFSET_SPEED_10; + break; + case SPEED_100: + txoffset = IGC_TXOFFSET_SPEED_100; + break; + case SPEED_1000: + txoffset = IGC_TXOFFSET_SPEED_1000; + break; + case SPEED_2500: + txoffset = IGC_TXOFFSET_SPEED_2500; + break; + default: + txoffset = 0; + break; + } + + wr32(IGC_GTXOFFSET, txoffset); +} + +/* Returns the TSN specific registers to their default values after + * the adapter is reset. + */ +static int igc_tsn_disable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl; + int i; + + wr32(IGC_GTXOFFSET, 0); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | + IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS); + + wr32(IGC_TQAVCTRL, tqavctrl); + + for (i = 0; i < adapter->num_tx_queues; i++) { + wr32(IGC_TXQCTL(i), 0); + wr32(IGC_STQT(i), 0); + wr32(IGC_ENDQT(i), NSEC_PER_SEC); + } + + wr32(IGC_QBVCYCLET_S, 0); + wr32(IGC_QBVCYCLET, NSEC_PER_SEC); + + adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; + + return 0; +} + +static int igc_tsn_enable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl, baset_l, baset_h; + u32 sec, nsec, cycle; + ktime_t base_time, systim; + int i; + + wr32(IGC_TSAUXC, 0); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); + wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + u32 txqctl = 0; + u16 cbs_value; + u32 tqavcc; + + wr32(IGC_STQT(i), ring->start_time); + wr32(IGC_ENDQT(i), ring->end_time); + + if (adapter->taprio_offload_enable) { + /* If taprio_offload_enable is set we are in "taprio" + * mode and we need to be strict about the + * cycles: only transmit a packet if it can be + * completed during that cycle. + * + * If taprio_offload_enable is NOT true when + * enabling TSN offload, the cycle should have + * no external effects, but is only used internally + * to adapt the base time register after a second + * has passed. + * + * Enabling strict mode in this case would + * unnecessarily prevent the transmission of + * certain packets (i.e. at the boundary of a + * second) and thus interfere with the launchtime + * feature that promises transmission at a + * certain point in time. + */ + txqctl |= IGC_TXQCTL_STRICT_CYCLE | + IGC_TXQCTL_STRICT_END; + } + + if (ring->launchtime_enable) + txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + + /* Skip configuring CBS for Q2 and Q3 */ + if (i > 1) + goto skip_cbs; + + if (ring->cbs_enable) { + if (i == 0) + txqctl |= IGC_TXQCTL_QAV_SEL_CBS0; + else + txqctl |= IGC_TXQCTL_QAV_SEL_CBS1; + + /* According to i225 datasheet section 7.5.2.7, we + * should set the 'idleSlope' field from TQAVCC + * register following the equation: + * + * value = link-speed 0x7736 * BW * 0.2 + * ---------- * ----------------- (E1) + * 100Mbps 2.5 + * + * Note that 'link-speed' is in Mbps. + * + * 'BW' is the percentage bandwidth out of full + * link speed which can be found with the + * following equation. Note that idleSlope here + * is the parameter from this function + * which is in kbps. + * + * BW = idleSlope + * ----------------- (E2) + * link-speed * 1000 + * + * That said, we can come up with a generic + * equation to calculate the value we should set + * it TQAVCC register by replacing 'BW' in E1 by E2. + * The resulting equation is: + * + * value = link-speed * 0x7736 * idleSlope * 0.2 + * ------------------------------------- (E3) + * 100 * 2.5 * link-speed * 1000 + * + * 'link-speed' is present in both sides of the + * fraction so it is canceled out. The final + * equation is the following: + * + * value = idleSlope * 61036 + * ----------------- (E4) + * 2500000 + * + * NOTE: For i225, given the above, we can see + * that idleslope is represented in + * 40.959433 kbps units by the value at + * the TQAVCC register (2.5Gbps / 61036), + * which reduces the granularity for + * idleslope increments. + * + * In i225 controller, the sendSlope and loCredit + * parameters from CBS are not configurable + * by software so we don't do any + * 'controller configuration' in respect to + * these parameters. + */ + cbs_value = DIV_ROUND_UP_ULL(ring->idleslope + * 61036ULL, 2500000); + + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK; + tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS; + wr32(IGC_TQAVCC(i), tqavcc); + + wr32(IGC_TQAVHC(i), + 0x80000000 + ring->hicredit * 0x7735); + } else { + /* Disable any CBS for the queue */ + txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); + + /* Set idleSlope to zero. */ + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK | + IGC_TQAVCC_KEEP_CREDITS); + wr32(IGC_TQAVCC(i), tqavcc); + + /* Set hiCredit to zero. */ + wr32(IGC_TQAVHC(i), 0); + } +skip_cbs: + wr32(IGC_TXQCTL(i), txqctl); + } + + tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS; + + tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + + adapter->qbv_count++; + + cycle = adapter->cycle_time; + base_time = adapter->base_time; + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + systim = ktime_set(sec, nsec); + if (ktime_compare(systim, base_time) > 0) { + s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle); + + base_time = ktime_add_ns(base_time, (n + 1) * cycle); + + /* Increase the counter if scheduling into the past while + * Gate Control List (GCL) is running. + */ + if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) && + (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) && + (adapter->qbv_count > 1)) + adapter->qbv_config_change_errors++; + } else { + if (igc_is_device_id_i226(hw)) { + ktime_t adjust_time, expires_time; + + /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit + * has to be configured before the cycle time and base time. + * Tx won't hang if a GCL is already running, + * so in this case we don't need to set FutScdDis. + */ + if (!(rd32(IGC_BASET_H) || rd32(IGC_BASET_L))) + tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS; + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + systim = ktime_set(sec, nsec); + + adjust_time = adapter->base_time; + expires_time = ktime_sub_ns(adjust_time, systim); + hrtimer_start(&adapter->hrtimer, expires_time, HRTIMER_MODE_REL); + } + } + + wr32(IGC_TQAVCTRL, tqavctrl); + + wr32(IGC_QBVCYCLET_S, cycle); + wr32(IGC_QBVCYCLET, cycle); + + baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); + wr32(IGC_BASET_H, baset_h); + + /* In i226, Future base time is only supported when FutScdDis bit + * is enabled and only active for re-configuration. + * In this case, initialize the base time with zero to create + * "re-configuration" scenario then only set the desired base time. + */ + if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS) + wr32(IGC_BASET_L, 0); + wr32(IGC_BASET_L, baset_l); + + return 0; +} + +int igc_tsn_reset(struct igc_adapter *adapter) +{ + unsigned int new_flags; + int err = 0; + + new_flags = igc_tsn_new_flags(adapter); + + if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED)) + return igc_tsn_disable_offload(adapter); + + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; + + adapter->flags = new_flags; + + return err; +} + +int igc_tsn_offload_apply(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + /* Per I225/6 HW Design Section 7.5.2.1, transmit mode + * cannot be changed dynamically. Require reset the adapter. + */ + if (netif_running(adapter->netdev) && + (igc_is_device_id_i225(hw) || !adapter->qbv_count)) { + schedule_work(&adapter->reset_task); + return 0; + } + + igc_tsn_reset(adapter); + + return 0; +} diff --git a/devices/igc/igc_tsn-6.4-orig.h b/devices/igc/igc_tsn-6.4-orig.h new file mode 100644 index 00000000..b53e6af5 --- /dev/null +++ b/devices/igc/igc_tsn-6.4-orig.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +#ifndef _IGC_TSN_H_ +#define _IGC_TSN_H_ + +int igc_tsn_offload_apply(struct igc_adapter *adapter); +int igc_tsn_reset(struct igc_adapter *adapter); +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter); + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_tsn-6.6-ethercat.c b/devices/igc/igc_tsn-6.6-ethercat.c new file mode 100644 index 00000000..5bd4dfca --- /dev/null +++ b/devices/igc/igc_tsn-6.6-ethercat.c @@ -0,0 +1,350 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc-6.6-ethercat.h" +#include "igc_hw-6.6-ethercat.h" +#include "igc_tsn-6.6-ethercat.h" + +static bool is_any_launchtime(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->launchtime_enable) + return true; + } + + return false; +} + +static bool is_cbs_enabled(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->cbs_enable) + return true; + } + + return false; +} + +static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) +{ + unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; + + if (adapter->taprio_offload_enable) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_any_launchtime(adapter)) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_cbs_enabled(adapter)) + new_flags |= IGC_FLAG_TSN_QAV_ENABLED; + + return new_flags; +} + +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u16 txoffset; + + if (!is_any_launchtime(adapter)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + txoffset = IGC_TXOFFSET_SPEED_10; + break; + case SPEED_100: + txoffset = IGC_TXOFFSET_SPEED_100; + break; + case SPEED_1000: + txoffset = IGC_TXOFFSET_SPEED_1000; + break; + case SPEED_2500: + txoffset = IGC_TXOFFSET_SPEED_2500; + break; + default: + txoffset = 0; + break; + } + + wr32(IGC_GTXOFFSET, txoffset); +} + +/* Returns the TSN specific registers to their default values after + * the adapter is reset. + */ +static int igc_tsn_disable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl; + int i; + + wr32(IGC_GTXOFFSET, 0); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | + IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS); + + wr32(IGC_TQAVCTRL, tqavctrl); + + for (i = 0; i < adapter->num_tx_queues; i++) { + wr32(IGC_TXQCTL(i), 0); + wr32(IGC_STQT(i), 0); + wr32(IGC_ENDQT(i), NSEC_PER_SEC); + } + + wr32(IGC_QBVCYCLET_S, 0); + wr32(IGC_QBVCYCLET, NSEC_PER_SEC); + + adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; + + return 0; +} + +static int igc_tsn_enable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl, baset_l, baset_h; + u32 sec, nsec, cycle; + ktime_t base_time, systim; + int i; + + wr32(IGC_TSAUXC, 0); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); + wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + u32 txqctl = 0; + u16 cbs_value; + u32 tqavcc; + + wr32(IGC_STQT(i), ring->start_time); + wr32(IGC_ENDQT(i), ring->end_time); + + if (adapter->taprio_offload_enable) { + /* If taprio_offload_enable is set we are in "taprio" + * mode and we need to be strict about the + * cycles: only transmit a packet if it can be + * completed during that cycle. + * + * If taprio_offload_enable is NOT true when + * enabling TSN offload, the cycle should have + * no external effects, but is only used internally + * to adapt the base time register after a second + * has passed. + * + * Enabling strict mode in this case would + * unnecessarily prevent the transmission of + * certain packets (i.e. at the boundary of a + * second) and thus interfere with the launchtime + * feature that promises transmission at a + * certain point in time. + */ + txqctl |= IGC_TXQCTL_STRICT_CYCLE | + IGC_TXQCTL_STRICT_END; + } + + if (ring->launchtime_enable) + txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + + /* Skip configuring CBS for Q2 and Q3 */ + if (i > 1) + goto skip_cbs; + + if (ring->cbs_enable) { + if (i == 0) + txqctl |= IGC_TXQCTL_QAV_SEL_CBS0; + else + txqctl |= IGC_TXQCTL_QAV_SEL_CBS1; + + /* According to i225 datasheet section 7.5.2.7, we + * should set the 'idleSlope' field from TQAVCC + * register following the equation: + * + * value = link-speed 0x7736 * BW * 0.2 + * ---------- * ----------------- (E1) + * 100Mbps 2.5 + * + * Note that 'link-speed' is in Mbps. + * + * 'BW' is the percentage bandwidth out of full + * link speed which can be found with the + * following equation. Note that idleSlope here + * is the parameter from this function + * which is in kbps. + * + * BW = idleSlope + * ----------------- (E2) + * link-speed * 1000 + * + * That said, we can come up with a generic + * equation to calculate the value we should set + * it TQAVCC register by replacing 'BW' in E1 by E2. + * The resulting equation is: + * + * value = link-speed * 0x7736 * idleSlope * 0.2 + * ------------------------------------- (E3) + * 100 * 2.5 * link-speed * 1000 + * + * 'link-speed' is present in both sides of the + * fraction so it is canceled out. The final + * equation is the following: + * + * value = idleSlope * 61036 + * ----------------- (E4) + * 2500000 + * + * NOTE: For i225, given the above, we can see + * that idleslope is represented in + * 40.959433 kbps units by the value at + * the TQAVCC register (2.5Gbps / 61036), + * which reduces the granularity for + * idleslope increments. + * + * In i225 controller, the sendSlope and loCredit + * parameters from CBS are not configurable + * by software so we don't do any + * 'controller configuration' in respect to + * these parameters. + */ + cbs_value = DIV_ROUND_UP_ULL(ring->idleslope + * 61036ULL, 2500000); + + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK; + tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS; + wr32(IGC_TQAVCC(i), tqavcc); + + wr32(IGC_TQAVHC(i), + 0x80000000 + ring->hicredit * 0x7735); + } else { + /* Disable any CBS for the queue */ + txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); + + /* Set idleSlope to zero. */ + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK | + IGC_TQAVCC_KEEP_CREDITS); + wr32(IGC_TQAVCC(i), tqavcc); + + /* Set hiCredit to zero. */ + wr32(IGC_TQAVHC(i), 0); + } +skip_cbs: + wr32(IGC_TXQCTL(i), txqctl); + } + + tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS; + + tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + + adapter->qbv_count++; + + cycle = adapter->cycle_time; + base_time = adapter->base_time; + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + systim = ktime_set(sec, nsec); + if (ktime_compare(systim, base_time) > 0) { + s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle); + + base_time = ktime_add_ns(base_time, (n + 1) * cycle); + + /* Increase the counter if scheduling into the past while + * Gate Control List (GCL) is running. + */ + if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) && + (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) && + (adapter->qbv_count > 1)) + adapter->qbv_config_change_errors++; + } else { + if (igc_is_device_id_i226(hw)) { + ktime_t adjust_time, expires_time; + + /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit + * has to be configured before the cycle time and base time. + * Tx won't hang if a GCL is already running, + * so in this case we don't need to set FutScdDis. + */ + if (!(rd32(IGC_BASET_H) || rd32(IGC_BASET_L))) + tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS; + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + systim = ktime_set(sec, nsec); + + adjust_time = adapter->base_time; + expires_time = ktime_sub_ns(adjust_time, systim); + hrtimer_start(&adapter->hrtimer, expires_time, HRTIMER_MODE_REL); + } + } + + wr32(IGC_TQAVCTRL, tqavctrl); + + wr32(IGC_QBVCYCLET_S, cycle); + wr32(IGC_QBVCYCLET, cycle); + + baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); + wr32(IGC_BASET_H, baset_h); + + /* In i226, Future base time is only supported when FutScdDis bit + * is enabled and only active for re-configuration. + * In this case, initialize the base time with zero to create + * "re-configuration" scenario then only set the desired base time. + */ + if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS) + wr32(IGC_BASET_L, 0); + wr32(IGC_BASET_L, baset_l); + + return 0; +} + +int igc_tsn_reset(struct igc_adapter *adapter) +{ + unsigned int new_flags; + int err = 0; + + new_flags = igc_tsn_new_flags(adapter); + + if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED)) + return igc_tsn_disable_offload(adapter); + + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; + + adapter->flags = new_flags; + + return err; +} + +int igc_tsn_offload_apply(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + /* Per I225/6 HW Design Section 7.5.2.1, transmit mode + * cannot be changed dynamically. Require reset the adapter. + */ + if (netif_running(adapter->netdev) && + (igc_is_device_id_i225(hw) || !adapter->qbv_count)) { + schedule_work(&adapter->reset_task); + return 0; + } + + igc_tsn_reset(adapter); + + return 0; +} diff --git a/devices/igc/igc_tsn-6.6-ethercat.h b/devices/igc/igc_tsn-6.6-ethercat.h new file mode 100644 index 00000000..b53e6af5 --- /dev/null +++ b/devices/igc/igc_tsn-6.6-ethercat.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +#ifndef _IGC_TSN_H_ +#define _IGC_TSN_H_ + +int igc_tsn_offload_apply(struct igc_adapter *adapter); +int igc_tsn_reset(struct igc_adapter *adapter); +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter); + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_tsn-6.6-orig.c b/devices/igc/igc_tsn-6.6-orig.c new file mode 100644 index 00000000..a9c08321 --- /dev/null +++ b/devices/igc/igc_tsn-6.6-orig.c @@ -0,0 +1,350 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Intel Corporation */ + +#include "igc.h" +#include "igc_hw.h" +#include "igc_tsn.h" + +static bool is_any_launchtime(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->launchtime_enable) + return true; + } + + return false; +} + +static bool is_cbs_enabled(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->cbs_enable) + return true; + } + + return false; +} + +static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) +{ + unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; + + if (adapter->taprio_offload_enable) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_any_launchtime(adapter)) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_cbs_enabled(adapter)) + new_flags |= IGC_FLAG_TSN_QAV_ENABLED; + + return new_flags; +} + +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u16 txoffset; + + if (!is_any_launchtime(adapter)) + return; + + switch (adapter->link_speed) { + case SPEED_10: + txoffset = IGC_TXOFFSET_SPEED_10; + break; + case SPEED_100: + txoffset = IGC_TXOFFSET_SPEED_100; + break; + case SPEED_1000: + txoffset = IGC_TXOFFSET_SPEED_1000; + break; + case SPEED_2500: + txoffset = IGC_TXOFFSET_SPEED_2500; + break; + default: + txoffset = 0; + break; + } + + wr32(IGC_GTXOFFSET, txoffset); +} + +/* Returns the TSN specific registers to their default values after + * the adapter is reset. + */ +static int igc_tsn_disable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl; + int i; + + wr32(IGC_GTXOFFSET, 0); + wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); + + tqavctrl = rd32(IGC_TQAVCTRL); + tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | + IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS); + + wr32(IGC_TQAVCTRL, tqavctrl); + + for (i = 0; i < adapter->num_tx_queues; i++) { + wr32(IGC_TXQCTL(i), 0); + wr32(IGC_STQT(i), 0); + wr32(IGC_ENDQT(i), NSEC_PER_SEC); + } + + wr32(IGC_QBVCYCLET_S, 0); + wr32(IGC_QBVCYCLET, NSEC_PER_SEC); + + adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; + + return 0; +} + +static int igc_tsn_enable_offload(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 tqavctrl, baset_l, baset_h; + u32 sec, nsec, cycle; + ktime_t base_time, systim; + int i; + + wr32(IGC_TSAUXC, 0); + wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); + wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + u32 txqctl = 0; + u16 cbs_value; + u32 tqavcc; + + wr32(IGC_STQT(i), ring->start_time); + wr32(IGC_ENDQT(i), ring->end_time); + + if (adapter->taprio_offload_enable) { + /* If taprio_offload_enable is set we are in "taprio" + * mode and we need to be strict about the + * cycles: only transmit a packet if it can be + * completed during that cycle. + * + * If taprio_offload_enable is NOT true when + * enabling TSN offload, the cycle should have + * no external effects, but is only used internally + * to adapt the base time register after a second + * has passed. + * + * Enabling strict mode in this case would + * unnecessarily prevent the transmission of + * certain packets (i.e. at the boundary of a + * second) and thus interfere with the launchtime + * feature that promises transmission at a + * certain point in time. + */ + txqctl |= IGC_TXQCTL_STRICT_CYCLE | + IGC_TXQCTL_STRICT_END; + } + + if (ring->launchtime_enable) + txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + + /* Skip configuring CBS for Q2 and Q3 */ + if (i > 1) + goto skip_cbs; + + if (ring->cbs_enable) { + if (i == 0) + txqctl |= IGC_TXQCTL_QAV_SEL_CBS0; + else + txqctl |= IGC_TXQCTL_QAV_SEL_CBS1; + + /* According to i225 datasheet section 7.5.2.7, we + * should set the 'idleSlope' field from TQAVCC + * register following the equation: + * + * value = link-speed 0x7736 * BW * 0.2 + * ---------- * ----------------- (E1) + * 100Mbps 2.5 + * + * Note that 'link-speed' is in Mbps. + * + * 'BW' is the percentage bandwidth out of full + * link speed which can be found with the + * following equation. Note that idleSlope here + * is the parameter from this function + * which is in kbps. + * + * BW = idleSlope + * ----------------- (E2) + * link-speed * 1000 + * + * That said, we can come up with a generic + * equation to calculate the value we should set + * it TQAVCC register by replacing 'BW' in E1 by E2. + * The resulting equation is: + * + * value = link-speed * 0x7736 * idleSlope * 0.2 + * ------------------------------------- (E3) + * 100 * 2.5 * link-speed * 1000 + * + * 'link-speed' is present in both sides of the + * fraction so it is canceled out. The final + * equation is the following: + * + * value = idleSlope * 61036 + * ----------------- (E4) + * 2500000 + * + * NOTE: For i225, given the above, we can see + * that idleslope is represented in + * 40.959433 kbps units by the value at + * the TQAVCC register (2.5Gbps / 61036), + * which reduces the granularity for + * idleslope increments. + * + * In i225 controller, the sendSlope and loCredit + * parameters from CBS are not configurable + * by software so we don't do any + * 'controller configuration' in respect to + * these parameters. + */ + cbs_value = DIV_ROUND_UP_ULL(ring->idleslope + * 61036ULL, 2500000); + + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK; + tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS; + wr32(IGC_TQAVCC(i), tqavcc); + + wr32(IGC_TQAVHC(i), + 0x80000000 + ring->hicredit * 0x7735); + } else { + /* Disable any CBS for the queue */ + txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); + + /* Set idleSlope to zero. */ + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK | + IGC_TQAVCC_KEEP_CREDITS); + wr32(IGC_TQAVCC(i), tqavcc); + + /* Set hiCredit to zero. */ + wr32(IGC_TQAVHC(i), 0); + } +skip_cbs: + wr32(IGC_TXQCTL(i), txqctl); + } + + tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS; + + tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + + adapter->qbv_count++; + + cycle = adapter->cycle_time; + base_time = adapter->base_time; + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + + systim = ktime_set(sec, nsec); + if (ktime_compare(systim, base_time) > 0) { + s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle); + + base_time = ktime_add_ns(base_time, (n + 1) * cycle); + + /* Increase the counter if scheduling into the past while + * Gate Control List (GCL) is running. + */ + if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) && + (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) && + (adapter->qbv_count > 1)) + adapter->qbv_config_change_errors++; + } else { + if (igc_is_device_id_i226(hw)) { + ktime_t adjust_time, expires_time; + + /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit + * has to be configured before the cycle time and base time. + * Tx won't hang if a GCL is already running, + * so in this case we don't need to set FutScdDis. + */ + if (!(rd32(IGC_BASET_H) || rd32(IGC_BASET_L))) + tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS; + + nsec = rd32(IGC_SYSTIML); + sec = rd32(IGC_SYSTIMH); + systim = ktime_set(sec, nsec); + + adjust_time = adapter->base_time; + expires_time = ktime_sub_ns(adjust_time, systim); + hrtimer_start(&adapter->hrtimer, expires_time, HRTIMER_MODE_REL); + } + } + + wr32(IGC_TQAVCTRL, tqavctrl); + + wr32(IGC_QBVCYCLET_S, cycle); + wr32(IGC_QBVCYCLET, cycle); + + baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l); + wr32(IGC_BASET_H, baset_h); + + /* In i226, Future base time is only supported when FutScdDis bit + * is enabled and only active for re-configuration. + * In this case, initialize the base time with zero to create + * "re-configuration" scenario then only set the desired base time. + */ + if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS) + wr32(IGC_BASET_L, 0); + wr32(IGC_BASET_L, baset_l); + + return 0; +} + +int igc_tsn_reset(struct igc_adapter *adapter) +{ + unsigned int new_flags; + int err = 0; + + new_flags = igc_tsn_new_flags(adapter); + + if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED)) + return igc_tsn_disable_offload(adapter); + + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; + + adapter->flags = new_flags; + + return err; +} + +int igc_tsn_offload_apply(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + /* Per I225/6 HW Design Section 7.5.2.1, transmit mode + * cannot be changed dynamically. Require reset the adapter. + */ + if (netif_running(adapter->netdev) && + (igc_is_device_id_i225(hw) || !adapter->qbv_count)) { + schedule_work(&adapter->reset_task); + return 0; + } + + igc_tsn_reset(adapter); + + return 0; +} diff --git a/devices/igc/igc_tsn-6.6-orig.h b/devices/igc/igc_tsn-6.6-orig.h new file mode 100644 index 00000000..b53e6af5 --- /dev/null +++ b/devices/igc/igc_tsn-6.6-orig.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Intel Corporation */ + +#ifndef _IGC_TSN_H_ +#define _IGC_TSN_H_ + +int igc_tsn_offload_apply(struct igc_adapter *adapter); +int igc_tsn_reset(struct igc_adapter *adapter); +void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter); + +#endif /* _IGC_BASE_H */ diff --git a/devices/igc/igc_xdp-6.4-ethercat.c b/devices/igc/igc_xdp-6.4-ethercat.c new file mode 100644 index 00000000..b07e9899 --- /dev/null +++ b/devices/igc/igc_xdp-6.4-ethercat.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +#include +#include + +#include "igc-6.4-ethercat.h" +#include "igc_xdp-6.4-ethercat.h" + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = adapter->netdev; + bool if_running = netif_running(dev); + struct bpf_prog *old_prog; + + if (dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + if (if_running) + igc_close(dev); + + old_prog = xchg(&adapter->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (prog) + xdp_features_set_redirect_target(dev, true); + else + xdp_features_clear_redirect_target(dev); + + if (if_running) + igc_open(dev); + + return 0; +} + +static int igc_xdp_enable_pool(struct igc_adapter *adapter, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct net_device *ndev = adapter->netdev; + struct device *dev = &adapter->pdev->dev; + struct igc_ring *rx_ring, *tx_ring; + struct napi_struct *napi; + bool needs_reset; + u32 frame_size; + int err; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) { + /* When XDP is enabled, the driver doesn't support frames that + * span over multiple buffers. To avoid that, we check if xsk + * frame size is big enough to fit the max ethernet frame size + * + vlan double tagging. + */ + return -EOPNOTSUPP; + } + + err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR); + if (err) { + netdev_err(ndev, "Failed to map xsk pool\n"); + return err; + } + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + + err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX); + if (err) { + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + return err; + } + } + + return 0; +} + +static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) +{ + struct igc_ring *rx_ring, *tx_ring; + struct xsk_buff_pool *pool; + struct napi_struct *napi; + bool needs_reset; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); + if (!pool) + return -EINVAL; + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + } + + return 0; +} + +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id) +{ + return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) : + igc_xdp_disable_pool(adapter, queue_id); +} diff --git a/devices/igc/igc_xdp-6.4-ethercat.h b/devices/igc/igc_xdp-6.4-ethercat.h new file mode 100644 index 00000000..a74e5487 --- /dev/null +++ b/devices/igc/igc_xdp-6.4-ethercat.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation. */ + +#ifndef _IGC_XDP_H_ +#define _IGC_XDP_H_ + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack); +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id); + +static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + +#endif /* _IGC_XDP_H_ */ diff --git a/devices/igc/igc_xdp-6.4-orig.c b/devices/igc/igc_xdp-6.4-orig.c new file mode 100644 index 00000000..e27af72a --- /dev/null +++ b/devices/igc/igc_xdp-6.4-orig.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +#include +#include + +#include "igc.h" +#include "igc_xdp.h" + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = adapter->netdev; + bool if_running = netif_running(dev); + struct bpf_prog *old_prog; + + if (dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + if (if_running) + igc_close(dev); + + old_prog = xchg(&adapter->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (prog) + xdp_features_set_redirect_target(dev, true); + else + xdp_features_clear_redirect_target(dev); + + if (if_running) + igc_open(dev); + + return 0; +} + +static int igc_xdp_enable_pool(struct igc_adapter *adapter, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct net_device *ndev = adapter->netdev; + struct device *dev = &adapter->pdev->dev; + struct igc_ring *rx_ring, *tx_ring; + struct napi_struct *napi; + bool needs_reset; + u32 frame_size; + int err; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) { + /* When XDP is enabled, the driver doesn't support frames that + * span over multiple buffers. To avoid that, we check if xsk + * frame size is big enough to fit the max ethernet frame size + * + vlan double tagging. + */ + return -EOPNOTSUPP; + } + + err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR); + if (err) { + netdev_err(ndev, "Failed to map xsk pool\n"); + return err; + } + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + + err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX); + if (err) { + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + return err; + } + } + + return 0; +} + +static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) +{ + struct igc_ring *rx_ring, *tx_ring; + struct xsk_buff_pool *pool; + struct napi_struct *napi; + bool needs_reset; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); + if (!pool) + return -EINVAL; + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + } + + return 0; +} + +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id) +{ + return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) : + igc_xdp_disable_pool(adapter, queue_id); +} diff --git a/devices/igc/igc_xdp-6.4-orig.h b/devices/igc/igc_xdp-6.4-orig.h new file mode 100644 index 00000000..a74e5487 --- /dev/null +++ b/devices/igc/igc_xdp-6.4-orig.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation. */ + +#ifndef _IGC_XDP_H_ +#define _IGC_XDP_H_ + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack); +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id); + +static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + +#endif /* _IGC_XDP_H_ */ diff --git a/devices/igc/igc_xdp-6.6-ethercat.c b/devices/igc/igc_xdp-6.6-ethercat.c new file mode 100644 index 00000000..6313490b --- /dev/null +++ b/devices/igc/igc_xdp-6.6-ethercat.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +#include +#include + +#include "igc-6.6-ethercat.h" +#include "igc_xdp-6.6-ethercat.h" + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = adapter->netdev; + bool if_running = netif_running(dev); + struct bpf_prog *old_prog; + + if (dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + if (if_running) + igc_close(dev); + + old_prog = xchg(&adapter->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (prog) + xdp_features_set_redirect_target(dev, true); + else + xdp_features_clear_redirect_target(dev); + + if (if_running) + igc_open(dev); + + return 0; +} + +static int igc_xdp_enable_pool(struct igc_adapter *adapter, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct net_device *ndev = adapter->netdev; + struct device *dev = &adapter->pdev->dev; + struct igc_ring *rx_ring, *tx_ring; + struct napi_struct *napi; + bool needs_reset; + u32 frame_size; + int err; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) { + /* When XDP is enabled, the driver doesn't support frames that + * span over multiple buffers. To avoid that, we check if xsk + * frame size is big enough to fit the max ethernet frame size + * + vlan double tagging. + */ + return -EOPNOTSUPP; + } + + err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR); + if (err) { + netdev_err(ndev, "Failed to map xsk pool\n"); + return err; + } + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + + err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX); + if (err) { + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + return err; + } + } + + return 0; +} + +static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) +{ + struct igc_ring *rx_ring, *tx_ring; + struct xsk_buff_pool *pool; + struct napi_struct *napi; + bool needs_reset; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); + if (!pool) + return -EINVAL; + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + } + + return 0; +} + +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id) +{ + return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) : + igc_xdp_disable_pool(adapter, queue_id); +} diff --git a/devices/igc/igc_xdp-6.6-ethercat.h b/devices/igc/igc_xdp-6.6-ethercat.h new file mode 100644 index 00000000..a74e5487 --- /dev/null +++ b/devices/igc/igc_xdp-6.6-ethercat.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation. */ + +#ifndef _IGC_XDP_H_ +#define _IGC_XDP_H_ + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack); +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id); + +static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + +#endif /* _IGC_XDP_H_ */ diff --git a/devices/igc/igc_xdp-6.6-orig.c b/devices/igc/igc_xdp-6.6-orig.c new file mode 100644 index 00000000..e27af72a --- /dev/null +++ b/devices/igc/igc_xdp-6.6-orig.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Intel Corporation. */ + +#include +#include + +#include "igc.h" +#include "igc_xdp.h" + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = adapter->netdev; + bool if_running = netif_running(dev); + struct bpf_prog *old_prog; + + if (dev->mtu > ETH_DATA_LEN) { + /* For now, the driver doesn't support XDP functionality with + * jumbo frames so we return error. + */ + NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported"); + return -EOPNOTSUPP; + } + + if (if_running) + igc_close(dev); + + old_prog = xchg(&adapter->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (prog) + xdp_features_set_redirect_target(dev, true); + else + xdp_features_clear_redirect_target(dev); + + if (if_running) + igc_open(dev); + + return 0; +} + +static int igc_xdp_enable_pool(struct igc_adapter *adapter, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct net_device *ndev = adapter->netdev; + struct device *dev = &adapter->pdev->dev; + struct igc_ring *rx_ring, *tx_ring; + struct napi_struct *napi; + bool needs_reset; + u32 frame_size; + int err; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + frame_size = xsk_pool_get_rx_frame_size(pool); + if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) { + /* When XDP is enabled, the driver doesn't support frames that + * span over multiple buffers. To avoid that, we check if xsk + * frame size is big enough to fit the max ethernet frame size + * + vlan double tagging. + */ + return -EOPNOTSUPP; + } + + err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR); + if (err) { + netdev_err(ndev, "Failed to map xsk pool\n"); + return err; + } + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + + err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX); + if (err) { + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + return err; + } + } + + return 0; +} + +static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id) +{ + struct igc_ring *rx_ring, *tx_ring; + struct xsk_buff_pool *pool; + struct napi_struct *napi; + bool needs_reset; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); + if (!pool) + return -EINVAL; + + needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter); + + rx_ring = adapter->rx_ring[queue_id]; + tx_ring = adapter->tx_ring[queue_id]; + /* Rx and Tx rings share the same napi context. */ + napi = &rx_ring->q_vector->napi; + + if (needs_reset) { + igc_disable_rx_ring(rx_ring); + igc_disable_tx_ring(tx_ring); + napi_disable(napi); + } + + xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags); + clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); + + if (needs_reset) { + napi_enable(napi); + igc_enable_rx_ring(rx_ring); + igc_enable_tx_ring(tx_ring); + } + + return 0; +} + +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id) +{ + return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) : + igc_xdp_disable_pool(adapter, queue_id); +} diff --git a/devices/igc/igc_xdp-6.6-orig.h b/devices/igc/igc_xdp-6.6-orig.h new file mode 100644 index 00000000..a74e5487 --- /dev/null +++ b/devices/igc/igc_xdp-6.6-orig.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020, Intel Corporation. */ + +#ifndef _IGC_XDP_H_ +#define _IGC_XDP_H_ + +int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack); +int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool, + u16 queue_id); + +static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + +#endif /* _IGC_XDP_H_ */ diff --git a/devices/igc/update.sh b/devices/igc/update.sh new file mode 100755 index 00000000..e20d2258 --- /dev/null +++ b/devices/igc/update.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -e + +if [ $# -ne 3 ]; then + echo "Need 3 arguments: 1) kernel source dir, 2) previous version, 3) version to add" + exit 1 +fi + +KERNELDIR=$1 +PREVER=$2 +KERNELVER=$3 + +IGCDIR=drivers/net/ethernet/intel/igc + +FILES="e1000_82575.c e1000_82575.h e1000_defines.h e1000_hw.h e1000_i210.c e1000_i210.h e1000_mac.c e1000_mac.h e1000_mbx.c e1000_mbx.h e1000_nvm.c e1000_nvm.h e1000_phy.c e1000_phy.h e1000_regs.h igc_ethtool.c igb.h igb_hwmon.c igb_main.c igb_ptp.c" +FILES="igc_base.c igc_diag.c igc_ethtool.c igc_i225.c igc_mac.h igc_nvm.c igc_phy.h igc_tsn.c igc_xdp.h igc_base.h igc_diag.h igc.h igc_i225.h igc_main.c igc_nvm.h igc_ptp.c igc_tsn.h igc_defines.h igc_dump.c igc_hw.h igc_mac.c igc_phy.c igc_regs.h igc_xdp.c" + +set -x + +for f in $FILES; do + echo $f + o=${f/\./-$KERNELVER-orig.} + e=${f/\./-$KERNELVER-ethercat.} + cp -v $KERNELDIR/$IGCDIR/$f $o + chmod 644 $o + cp -v $o $e + op=${f/\./-$PREVER-orig.} + ep=${f/\./-$PREVER-ethercat.} + diff -u $op $ep | patch -p1 $e + git add $o $e +done diff --git a/devices/r8169/Kbuild.in b/devices/r8169/Kbuild.in new file mode 100644 index 00000000..5535c59e --- /dev/null +++ b/devices/r8169/Kbuild.in @@ -0,0 +1,59 @@ +#------------------------------------------------------------------------------ +# +# $Id$ +# +# Copyright (C) 2006-2008 Florian Pose, Ingenieurgemeinschaft IgH +# +# This file is part of the IgH EtherCAT Master. +# +# The IgH EtherCAT Master is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License version 2, as +# published by the Free Software Foundation. +# +# The IgH EtherCAT Master is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the IgH EtherCAT Master; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# --- +# +# The license mentioned above concerns the source code only. Using the +# EtherCAT technology and brand is only permitted in compliance with the +# industrial property and similar rights of Beckhoff Automation GmbH. +# +# --- +# +# vim: syntax=make +# +#------------------------------------------------------------------------------ + + +TOPDIR := $(src)/../.. + +REV := $(shell if test -s $(TOPDIR)/revision; then \ + cat $(TOPDIR)/revision; \ + else \ + git -C $(TOPDIR) describe 2>/dev/null || echo "unknown"; \ + fi) + +ifeq (@ENABLE_R8169@,1) +ifeq (@R8169_IN_SUBDIR@,1) + obj-m += ec_r8169.o + + ec_r8169-objs := \ + r8169_firmware-@KERNEL_R8169@-ethercat.o \ + r8169_main-@KERNEL_R8169@-ethercat.o \ + r8169_phy_config-@KERNEL_R8169@-ethercat.o + + CFLAGS_r8169_main-@KERNEL_R8169@-ethercat.o = -DREV=$(REV) +endif +endif + + +KBUILD_EXTRA_SYMBOLS := \ + @abs_top_builddir@/$(LINUX_SYMVERS) \ + @abs_top_builddir@/master/$(LINUX_SYMVERS) diff --git a/devices/r8169/Makefile.am b/devices/r8169/Makefile.am new file mode 100644 index 00000000..a0e4c4ec --- /dev/null +++ b/devices/r8169/Makefile.am @@ -0,0 +1,70 @@ +#------------------------------------------------------------------------------ +# +# Copyright (C) 2006-2021 Florian Pose, Ingenieurgemeinschaft IgH +# +# This file is part of the IgH EtherCAT Master. +# +# The IgH EtherCAT Master is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License version 2, as +# published by the Free Software Foundation. +# +# The IgH EtherCAT Master is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the IgH EtherCAT Master; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# --- +# +# The license mentioned above concerns the source code only. Using the +# EtherCAT technology and brand is only permitted in compliance with the +# industrial property and similar rights of Beckhoff Automation GmbH. +# +#------------------------------------------------------------------------------ + +include $(top_srcdir)/Makefile.kbuild + +EXTRA_DIST = \ + r8169-5.10-ethercat.h \ + r8169-5.10-orig.h \ + r8169-5.14-ethercat.h \ + r8169-5.14-orig.h \ + r8169-5.15-ethercat.h \ + r8169-5.15-orig.h \ + r8169-6.1-ethercat.h \ + r8169-6.1-orig.h \ + r8169_firmware-5.10-ethercat.c \ + r8169_firmware-5.10-ethercat.h \ + r8169_firmware-5.10-orig.c \ + r8169_firmware-5.10-orig.h \ + r8169_firmware-5.14-ethercat.c \ + r8169_firmware-5.14-ethercat.h \ + r8169_firmware-5.14-orig.c \ + r8169_firmware-5.14-orig.h \ + r8169_firmware-5.15-ethercat.c \ + r8169_firmware-5.15-ethercat.h \ + r8169_firmware-5.15-orig.c \ + r8169_firmware-5.15-orig.h \ + r8169_firmware-6.1-ethercat.c \ + r8169_firmware-6.1-ethercat.h \ + r8169_firmware-6.1-orig.c \ + r8169_firmware-6.1-orig.h \ + r8169_main-5.10-ethercat.c \ + r8169_main-5.10-orig.c \ + r8169_main-5.14-ethercat.c \ + r8169_main-5.14-orig.c \ + r8169_main-5.15-ethercat.c \ + r8169_main-5.15-orig.c \ + r8169_main-6.1-ethercat.c \ + r8169_main-6.1-orig.c \ + r8169_phy_config-5.10-ethercat.c \ + r8169_phy_config-5.10-orig.c \ + r8169_phy_config-5.14-ethercat.c \ + r8169_phy_config-5.14-orig.c \ + r8169_phy_config-5.15-ethercat.c \ + r8169_phy_config-5.15-orig.c \ + r8169_phy_config-6.1-ethercat.c \ + r8169_phy_config-6.1-orig.c diff --git a/devices/r8169/r8169-5.10-ethercat.h b/devices/r8169/r8169-5.10-ethercat.h new file mode 100644 index 00000000..7be86ef5 --- /dev/null +++ b/devices/r8169/r8169-5.10-ethercat.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +enum mac_version { + /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */ + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + RTL_GIGA_MAC_VER_41, + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, + RTL_GIGA_MAC_VER_45, + RTL_GIGA_MAC_VER_46, + RTL_GIGA_MAC_VER_47, + RTL_GIGA_MAC_VER_48, + RTL_GIGA_MAC_VER_49, + RTL_GIGA_MAC_VER_50, + RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_VER_52, + RTL_GIGA_MAC_VER_60, + RTL_GIGA_MAC_VER_61, + RTL_GIGA_MAC_VER_63, + RTL_GIGA_MAC_NONE +}; + +struct rtl8169_private; + +void r8169_apply_firmware(struct rtl8169_private *tp); +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp); +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr); +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver); diff --git a/devices/r8169/r8169-5.10-orig.h b/devices/r8169/r8169-5.10-orig.h new file mode 100644 index 00000000..7be86ef5 --- /dev/null +++ b/devices/r8169/r8169-5.10-orig.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +enum mac_version { + /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */ + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + RTL_GIGA_MAC_VER_41, + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, + RTL_GIGA_MAC_VER_45, + RTL_GIGA_MAC_VER_46, + RTL_GIGA_MAC_VER_47, + RTL_GIGA_MAC_VER_48, + RTL_GIGA_MAC_VER_49, + RTL_GIGA_MAC_VER_50, + RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_VER_52, + RTL_GIGA_MAC_VER_60, + RTL_GIGA_MAC_VER_61, + RTL_GIGA_MAC_VER_63, + RTL_GIGA_MAC_NONE +}; + +struct rtl8169_private; + +void r8169_apply_firmware(struct rtl8169_private *tp); +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp); +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr); +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver); diff --git a/devices/r8169/r8169-5.14-ethercat.h b/devices/r8169/r8169-5.14-ethercat.h new file mode 100644 index 00000000..2728df46 --- /dev/null +++ b/devices/r8169/r8169-5.14-ethercat.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +enum mac_version { + /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */ + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + RTL_GIGA_MAC_VER_41, + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, + RTL_GIGA_MAC_VER_45, + RTL_GIGA_MAC_VER_46, + RTL_GIGA_MAC_VER_47, + RTL_GIGA_MAC_VER_48, + RTL_GIGA_MAC_VER_49, + RTL_GIGA_MAC_VER_50, + RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_VER_52, + RTL_GIGA_MAC_VER_53, + RTL_GIGA_MAC_VER_60, + RTL_GIGA_MAC_VER_61, + RTL_GIGA_MAC_VER_63, + RTL_GIGA_MAC_NONE +}; + +struct rtl8169_private; + +void r8169_apply_firmware(struct rtl8169_private *tp); +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp); +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr); +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver); diff --git a/devices/r8169/r8169-5.14-orig.h b/devices/r8169/r8169-5.14-orig.h new file mode 100644 index 00000000..2728df46 --- /dev/null +++ b/devices/r8169/r8169-5.14-orig.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +enum mac_version { + /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */ + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + RTL_GIGA_MAC_VER_41, + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, + RTL_GIGA_MAC_VER_45, + RTL_GIGA_MAC_VER_46, + RTL_GIGA_MAC_VER_47, + RTL_GIGA_MAC_VER_48, + RTL_GIGA_MAC_VER_49, + RTL_GIGA_MAC_VER_50, + RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_VER_52, + RTL_GIGA_MAC_VER_53, + RTL_GIGA_MAC_VER_60, + RTL_GIGA_MAC_VER_61, + RTL_GIGA_MAC_VER_63, + RTL_GIGA_MAC_NONE +}; + +struct rtl8169_private; + +void r8169_apply_firmware(struct rtl8169_private *tp); +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp); +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr); +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver); diff --git a/devices/r8169/r8169-5.15-ethercat.h b/devices/r8169/r8169-5.15-ethercat.h new file mode 100644 index 00000000..2728df46 --- /dev/null +++ b/devices/r8169/r8169-5.15-ethercat.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +enum mac_version { + /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */ + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + RTL_GIGA_MAC_VER_41, + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, + RTL_GIGA_MAC_VER_45, + RTL_GIGA_MAC_VER_46, + RTL_GIGA_MAC_VER_47, + RTL_GIGA_MAC_VER_48, + RTL_GIGA_MAC_VER_49, + RTL_GIGA_MAC_VER_50, + RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_VER_52, + RTL_GIGA_MAC_VER_53, + RTL_GIGA_MAC_VER_60, + RTL_GIGA_MAC_VER_61, + RTL_GIGA_MAC_VER_63, + RTL_GIGA_MAC_NONE +}; + +struct rtl8169_private; + +void r8169_apply_firmware(struct rtl8169_private *tp); +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp); +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr); +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver); diff --git a/devices/r8169/r8169-5.15-orig.h b/devices/r8169/r8169-5.15-orig.h new file mode 100644 index 00000000..2728df46 --- /dev/null +++ b/devices/r8169/r8169-5.15-orig.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +enum mac_version { + /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */ + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + RTL_GIGA_MAC_VER_41, + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, + RTL_GIGA_MAC_VER_45, + RTL_GIGA_MAC_VER_46, + RTL_GIGA_MAC_VER_47, + RTL_GIGA_MAC_VER_48, + RTL_GIGA_MAC_VER_49, + RTL_GIGA_MAC_VER_50, + RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_VER_52, + RTL_GIGA_MAC_VER_53, + RTL_GIGA_MAC_VER_60, + RTL_GIGA_MAC_VER_61, + RTL_GIGA_MAC_VER_63, + RTL_GIGA_MAC_NONE +}; + +struct rtl8169_private; + +void r8169_apply_firmware(struct rtl8169_private *tp); +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp); +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr); +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver); diff --git a/devices/r8169/r8169-6.1-ethercat.h b/devices/r8169/r8169-6.1-ethercat.h new file mode 100644 index 00000000..55ef8251 --- /dev/null +++ b/devices/r8169/r8169-6.1-ethercat.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +enum mac_version { + /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */ + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + /* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */ + /* RTL_GIGA_MAC_VER_13 was merged with VER_10 */ + RTL_GIGA_MAC_VER_14, + /* RTL_GIGA_MAC_VER_16 was merged with VER_10 */ + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + /* support for RTL_GIGA_MAC_VER_27 has been removed */ + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + /* support for RTL_GIGA_MAC_VER_41 has been removed */ + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, + /* support for RTL_GIGA_MAC_VER_45 has been removed */ + RTL_GIGA_MAC_VER_46, + /* support for RTL_GIGA_MAC_VER_47 has been removed */ + RTL_GIGA_MAC_VER_48, + /* support for RTL_GIGA_MAC_VER_49 has been removed */ + /* support for RTL_GIGA_MAC_VER_50 has been removed */ + RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_VER_52, + RTL_GIGA_MAC_VER_53, + /* support for RTL_GIGA_MAC_VER_60 has been removed */ + RTL_GIGA_MAC_VER_61, + RTL_GIGA_MAC_VER_63, + RTL_GIGA_MAC_NONE +}; + +struct rtl8169_private; + +void r8169_apply_firmware(struct rtl8169_private *tp); +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp); +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr); +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver); diff --git a/devices/r8169/r8169-6.1-orig.h b/devices/r8169/r8169-6.1-orig.h new file mode 100644 index 00000000..55ef8251 --- /dev/null +++ b/devices/r8169/r8169-6.1-orig.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +enum mac_version { + /* support for ancient RTL_GIGA_MAC_VER_01 has been removed */ + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + /* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */ + /* RTL_GIGA_MAC_VER_13 was merged with VER_10 */ + RTL_GIGA_MAC_VER_14, + /* RTL_GIGA_MAC_VER_16 was merged with VER_10 */ + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + /* support for RTL_GIGA_MAC_VER_27 has been removed */ + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + /* support for RTL_GIGA_MAC_VER_41 has been removed */ + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, + /* support for RTL_GIGA_MAC_VER_45 has been removed */ + RTL_GIGA_MAC_VER_46, + /* support for RTL_GIGA_MAC_VER_47 has been removed */ + RTL_GIGA_MAC_VER_48, + /* support for RTL_GIGA_MAC_VER_49 has been removed */ + /* support for RTL_GIGA_MAC_VER_50 has been removed */ + RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_VER_52, + RTL_GIGA_MAC_VER_53, + /* support for RTL_GIGA_MAC_VER_60 has been removed */ + RTL_GIGA_MAC_VER_61, + RTL_GIGA_MAC_VER_63, + RTL_GIGA_MAC_NONE +}; + +struct rtl8169_private; + +void r8169_apply_firmware(struct rtl8169_private *tp); +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp); +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr); +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver); diff --git a/devices/r8169/r8169_firmware-5.10-ethercat.c b/devices/r8169/r8169_firmware-5.10-ethercat.c new file mode 100644 index 00000000..71e9b6e7 --- /dev/null +++ b/devices/r8169/r8169_firmware-5.10-ethercat.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* r8169_firmware.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169_firmware-5.10-ethercat.h" + +enum rtl_fw_opcode { + PHY_READ = 0x0, + PHY_DATA_OR = 0x1, + PHY_DATA_AND = 0x2, + PHY_BJMPN = 0x3, + PHY_MDIO_CHG = 0x4, + PHY_CLEAR_READCOUNT = 0x7, + PHY_WRITE = 0x8, + PHY_READCOUNT_EQ_SKIP = 0x9, + PHY_COMP_EQ_SKIPN = 0xa, + PHY_COMP_NEQ_SKIPN = 0xb, + PHY_WRITE_PREVIOUS = 0xc, + PHY_SKIPN = 0xd, + PHY_DELAY_MS = 0xe, +}; + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0]) + +static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + + if (fw->size < FW_OPCODE_SIZE) + return false; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + return false; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + return false; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + return false; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, rtl_fw->fw_name, RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + + return true; +} + +static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + switch (action >> 28) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + + case PHY_BJMPN: + if (regno > index) + goto out; + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) + goto out; + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) + goto out; + break; + + default: + dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action); + return false; + } + } + + return true; +out: + dev_err(rtl_fw->dev, "Out of range of firmware\n"); + return false; +} + +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + rtl_fw_write_t fw_write = rtl_fw->phy_write; + rtl_fw_read_t fw_read = rtl_fw->phy_read; + int predata = 0, count = 0; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + enum rtl_fw_opcode opcode = action >> 28; + + if (!action) + break; + + switch (opcode) { + case PHY_READ: + predata = fw_read(tp, regno); + count++; + break; + case PHY_DATA_OR: + predata |= data; + break; + case PHY_DATA_AND: + predata &= data; + break; + case PHY_BJMPN: + index -= (regno + 1); + break; + case PHY_MDIO_CHG: + if (data) { + fw_write = rtl_fw->mac_mcu_write; + fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; + } + + break; + case PHY_CLEAR_READCOUNT: + count = 0; + break; + case PHY_WRITE: + fw_write(tp, regno, data); + break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index++; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + break; + case PHY_WRITE_PREVIOUS: + fw_write(tp, regno, predata); + break; + case PHY_SKIPN: + index += regno; + break; + case PHY_DELAY_MS: + msleep(data); + break; + } + } +} + +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw) +{ + release_firmware(rtl_fw->fw); +} + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw) +{ + int rc; + + rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); + if (rc < 0) + goto out; + + if (!rtl_fw_format_ok(rtl_fw) || !rtl_fw_data_ok(rtl_fw)) { + release_firmware(rtl_fw->fw); + rc = -EINVAL; + goto out; + } + + return 0; +out: + dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n", + rtl_fw->fw_name, rc); + return rc; +} diff --git a/devices/r8169/r8169_firmware-5.10-ethercat.h b/devices/r8169/r8169_firmware-5.10-ethercat.h new file mode 100644 index 00000000..7dc348ed --- /dev/null +++ b/devices/r8169/r8169_firmware-5.10-ethercat.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169_firmware.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +struct rtl8169_private; +typedef void (*rtl_fw_write_t)(struct rtl8169_private *tp, int reg, int val); +typedef int (*rtl_fw_read_t)(struct rtl8169_private *tp, int reg); + +#define RTL_VER_SIZE 32 + +struct rtl_fw { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; +}; + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw); diff --git a/devices/r8169/r8169_firmware-5.10-orig.c b/devices/r8169/r8169_firmware-5.10-orig.c new file mode 100644 index 00000000..cbc6b846 --- /dev/null +++ b/devices/r8169/r8169_firmware-5.10-orig.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* r8169_firmware.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169_firmware.h" + +enum rtl_fw_opcode { + PHY_READ = 0x0, + PHY_DATA_OR = 0x1, + PHY_DATA_AND = 0x2, + PHY_BJMPN = 0x3, + PHY_MDIO_CHG = 0x4, + PHY_CLEAR_READCOUNT = 0x7, + PHY_WRITE = 0x8, + PHY_READCOUNT_EQ_SKIP = 0x9, + PHY_COMP_EQ_SKIPN = 0xa, + PHY_COMP_NEQ_SKIPN = 0xb, + PHY_WRITE_PREVIOUS = 0xc, + PHY_SKIPN = 0xd, + PHY_DELAY_MS = 0xe, +}; + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0]) + +static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + + if (fw->size < FW_OPCODE_SIZE) + return false; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + return false; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + return false; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + return false; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, rtl_fw->fw_name, RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + + return true; +} + +static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + switch (action >> 28) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + + case PHY_BJMPN: + if (regno > index) + goto out; + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) + goto out; + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) + goto out; + break; + + default: + dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action); + return false; + } + } + + return true; +out: + dev_err(rtl_fw->dev, "Out of range of firmware\n"); + return false; +} + +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + rtl_fw_write_t fw_write = rtl_fw->phy_write; + rtl_fw_read_t fw_read = rtl_fw->phy_read; + int predata = 0, count = 0; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + enum rtl_fw_opcode opcode = action >> 28; + + if (!action) + break; + + switch (opcode) { + case PHY_READ: + predata = fw_read(tp, regno); + count++; + break; + case PHY_DATA_OR: + predata |= data; + break; + case PHY_DATA_AND: + predata &= data; + break; + case PHY_BJMPN: + index -= (regno + 1); + break; + case PHY_MDIO_CHG: + if (data) { + fw_write = rtl_fw->mac_mcu_write; + fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; + } + + break; + case PHY_CLEAR_READCOUNT: + count = 0; + break; + case PHY_WRITE: + fw_write(tp, regno, data); + break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index++; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + break; + case PHY_WRITE_PREVIOUS: + fw_write(tp, regno, predata); + break; + case PHY_SKIPN: + index += regno; + break; + case PHY_DELAY_MS: + msleep(data); + break; + } + } +} + +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw) +{ + release_firmware(rtl_fw->fw); +} + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw) +{ + int rc; + + rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); + if (rc < 0) + goto out; + + if (!rtl_fw_format_ok(rtl_fw) || !rtl_fw_data_ok(rtl_fw)) { + release_firmware(rtl_fw->fw); + rc = -EINVAL; + goto out; + } + + return 0; +out: + dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n", + rtl_fw->fw_name, rc); + return rc; +} diff --git a/devices/r8169/r8169_firmware-5.10-orig.h b/devices/r8169/r8169_firmware-5.10-orig.h new file mode 100644 index 00000000..7dc348ed --- /dev/null +++ b/devices/r8169/r8169_firmware-5.10-orig.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169_firmware.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +struct rtl8169_private; +typedef void (*rtl_fw_write_t)(struct rtl8169_private *tp, int reg, int val); +typedef int (*rtl_fw_read_t)(struct rtl8169_private *tp, int reg); + +#define RTL_VER_SIZE 32 + +struct rtl_fw { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; +}; + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw); diff --git a/devices/r8169/r8169_firmware-5.14-ethercat.c b/devices/r8169/r8169_firmware-5.14-ethercat.c new file mode 100644 index 00000000..c956a957 --- /dev/null +++ b/devices/r8169/r8169_firmware-5.14-ethercat.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* r8169_firmware.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169_firmware-5.14-ethercat.h" + +enum rtl_fw_opcode { + PHY_READ = 0x0, + PHY_DATA_OR = 0x1, + PHY_DATA_AND = 0x2, + PHY_BJMPN = 0x3, + PHY_MDIO_CHG = 0x4, + PHY_CLEAR_READCOUNT = 0x7, + PHY_WRITE = 0x8, + PHY_READCOUNT_EQ_SKIP = 0x9, + PHY_COMP_EQ_SKIPN = 0xa, + PHY_COMP_NEQ_SKIPN = 0xb, + PHY_WRITE_PREVIOUS = 0xc, + PHY_SKIPN = 0xd, + PHY_DELAY_MS = 0xe, +}; + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0]) + +static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + + if (fw->size < FW_OPCODE_SIZE) + return false; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + return false; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + return false; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + return false; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, rtl_fw->fw_name, RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + + return true; +} + +static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + switch (action >> 28) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + + case PHY_BJMPN: + if (regno > index) + goto out; + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) + goto out; + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) + goto out; + break; + + default: + dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action); + return false; + } + } + + return true; +out: + dev_err(rtl_fw->dev, "Out of range of firmware\n"); + return false; +} + +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + rtl_fw_write_t fw_write = rtl_fw->phy_write; + rtl_fw_read_t fw_read = rtl_fw->phy_read; + int predata = 0, count = 0; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + enum rtl_fw_opcode opcode = action >> 28; + + if (!action) + break; + + switch (opcode) { + case PHY_READ: + predata = fw_read(tp, regno); + count++; + break; + case PHY_DATA_OR: + predata |= data; + break; + case PHY_DATA_AND: + predata &= data; + break; + case PHY_BJMPN: + index -= (regno + 1); + break; + case PHY_MDIO_CHG: + if (data) { + fw_write = rtl_fw->mac_mcu_write; + fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; + } + + break; + case PHY_CLEAR_READCOUNT: + count = 0; + break; + case PHY_WRITE: + fw_write(tp, regno, data); + break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index++; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + break; + case PHY_WRITE_PREVIOUS: + fw_write(tp, regno, predata); + break; + case PHY_SKIPN: + index += regno; + break; + case PHY_DELAY_MS: + msleep(data); + break; + } + } +} + +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw) +{ + release_firmware(rtl_fw->fw); +} + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw) +{ + int rc; + + rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); + if (rc < 0) + goto out; + + if (!rtl_fw_format_ok(rtl_fw) || !rtl_fw_data_ok(rtl_fw)) { + release_firmware(rtl_fw->fw); + rc = -EINVAL; + goto out; + } + + return 0; +out: + dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n", + rtl_fw->fw_name, rc); + return rc; +} diff --git a/devices/r8169/r8169_firmware-5.14-ethercat.h b/devices/r8169/r8169_firmware-5.14-ethercat.h new file mode 100644 index 00000000..7dc348ed --- /dev/null +++ b/devices/r8169/r8169_firmware-5.14-ethercat.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169_firmware.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +struct rtl8169_private; +typedef void (*rtl_fw_write_t)(struct rtl8169_private *tp, int reg, int val); +typedef int (*rtl_fw_read_t)(struct rtl8169_private *tp, int reg); + +#define RTL_VER_SIZE 32 + +struct rtl_fw { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; +}; + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw); diff --git a/devices/r8169/r8169_firmware-5.14-orig.c b/devices/r8169/r8169_firmware-5.14-orig.c new file mode 100644 index 00000000..cbc6b846 --- /dev/null +++ b/devices/r8169/r8169_firmware-5.14-orig.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* r8169_firmware.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169_firmware.h" + +enum rtl_fw_opcode { + PHY_READ = 0x0, + PHY_DATA_OR = 0x1, + PHY_DATA_AND = 0x2, + PHY_BJMPN = 0x3, + PHY_MDIO_CHG = 0x4, + PHY_CLEAR_READCOUNT = 0x7, + PHY_WRITE = 0x8, + PHY_READCOUNT_EQ_SKIP = 0x9, + PHY_COMP_EQ_SKIPN = 0xa, + PHY_COMP_NEQ_SKIPN = 0xb, + PHY_WRITE_PREVIOUS = 0xc, + PHY_SKIPN = 0xd, + PHY_DELAY_MS = 0xe, +}; + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0]) + +static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + + if (fw->size < FW_OPCODE_SIZE) + return false; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + return false; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + return false; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + return false; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, rtl_fw->fw_name, RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + + return true; +} + +static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + switch (action >> 28) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + + case PHY_BJMPN: + if (regno > index) + goto out; + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) + goto out; + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) + goto out; + break; + + default: + dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action); + return false; + } + } + + return true; +out: + dev_err(rtl_fw->dev, "Out of range of firmware\n"); + return false; +} + +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + rtl_fw_write_t fw_write = rtl_fw->phy_write; + rtl_fw_read_t fw_read = rtl_fw->phy_read; + int predata = 0, count = 0; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + enum rtl_fw_opcode opcode = action >> 28; + + if (!action) + break; + + switch (opcode) { + case PHY_READ: + predata = fw_read(tp, regno); + count++; + break; + case PHY_DATA_OR: + predata |= data; + break; + case PHY_DATA_AND: + predata &= data; + break; + case PHY_BJMPN: + index -= (regno + 1); + break; + case PHY_MDIO_CHG: + if (data) { + fw_write = rtl_fw->mac_mcu_write; + fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; + } + + break; + case PHY_CLEAR_READCOUNT: + count = 0; + break; + case PHY_WRITE: + fw_write(tp, regno, data); + break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index++; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + break; + case PHY_WRITE_PREVIOUS: + fw_write(tp, regno, predata); + break; + case PHY_SKIPN: + index += regno; + break; + case PHY_DELAY_MS: + msleep(data); + break; + } + } +} + +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw) +{ + release_firmware(rtl_fw->fw); +} + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw) +{ + int rc; + + rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); + if (rc < 0) + goto out; + + if (!rtl_fw_format_ok(rtl_fw) || !rtl_fw_data_ok(rtl_fw)) { + release_firmware(rtl_fw->fw); + rc = -EINVAL; + goto out; + } + + return 0; +out: + dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n", + rtl_fw->fw_name, rc); + return rc; +} diff --git a/devices/r8169/r8169_firmware-5.14-orig.h b/devices/r8169/r8169_firmware-5.14-orig.h new file mode 100644 index 00000000..7dc348ed --- /dev/null +++ b/devices/r8169/r8169_firmware-5.14-orig.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169_firmware.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +struct rtl8169_private; +typedef void (*rtl_fw_write_t)(struct rtl8169_private *tp, int reg, int val); +typedef int (*rtl_fw_read_t)(struct rtl8169_private *tp, int reg); + +#define RTL_VER_SIZE 32 + +struct rtl_fw { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; +}; + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw); diff --git a/devices/r8169/r8169_firmware-5.15-ethercat.c b/devices/r8169/r8169_firmware-5.15-ethercat.c new file mode 100644 index 00000000..c2da9ada --- /dev/null +++ b/devices/r8169/r8169_firmware-5.15-ethercat.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* r8169_firmware.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169_firmware-5.15-ethercat.h" + +enum rtl_fw_opcode { + PHY_READ = 0x0, + PHY_DATA_OR = 0x1, + PHY_DATA_AND = 0x2, + PHY_BJMPN = 0x3, + PHY_MDIO_CHG = 0x4, + PHY_CLEAR_READCOUNT = 0x7, + PHY_WRITE = 0x8, + PHY_READCOUNT_EQ_SKIP = 0x9, + PHY_COMP_EQ_SKIPN = 0xa, + PHY_COMP_NEQ_SKIPN = 0xb, + PHY_WRITE_PREVIOUS = 0xc, + PHY_SKIPN = 0xd, + PHY_DELAY_MS = 0xe, +}; + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0]) + +static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + + if (fw->size < FW_OPCODE_SIZE) + return false; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + return false; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + return false; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + return false; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, rtl_fw->fw_name, RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + + return true; +} + +static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + switch (action >> 28) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + + case PHY_BJMPN: + if (regno > index) + goto out; + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) + goto out; + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) + goto out; + break; + + default: + dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action); + return false; + } + } + + return true; +out: + dev_err(rtl_fw->dev, "Out of range of firmware\n"); + return false; +} + +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + rtl_fw_write_t fw_write = rtl_fw->phy_write; + rtl_fw_read_t fw_read = rtl_fw->phy_read; + int predata = 0, count = 0; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + enum rtl_fw_opcode opcode = action >> 28; + + if (!action) + break; + + switch (opcode) { + case PHY_READ: + predata = fw_read(tp, regno); + count++; + break; + case PHY_DATA_OR: + predata |= data; + break; + case PHY_DATA_AND: + predata &= data; + break; + case PHY_BJMPN: + index -= (regno + 1); + break; + case PHY_MDIO_CHG: + if (data) { + fw_write = rtl_fw->mac_mcu_write; + fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; + } + + break; + case PHY_CLEAR_READCOUNT: + count = 0; + break; + case PHY_WRITE: + fw_write(tp, regno, data); + break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index++; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + break; + case PHY_WRITE_PREVIOUS: + fw_write(tp, regno, predata); + break; + case PHY_SKIPN: + index += regno; + break; + case PHY_DELAY_MS: + msleep(data); + break; + } + } +} + +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw) +{ + release_firmware(rtl_fw->fw); +} + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw) +{ + int rc; + + rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); + if (rc < 0) + goto out; + + if (!rtl_fw_format_ok(rtl_fw) || !rtl_fw_data_ok(rtl_fw)) { + release_firmware(rtl_fw->fw); + rc = -EINVAL; + goto out; + } + + return 0; +out: + dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n", + rtl_fw->fw_name, rc); + return rc; +} diff --git a/devices/r8169/r8169_firmware-5.15-ethercat.h b/devices/r8169/r8169_firmware-5.15-ethercat.h new file mode 100644 index 00000000..7dc348ed --- /dev/null +++ b/devices/r8169/r8169_firmware-5.15-ethercat.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169_firmware.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +struct rtl8169_private; +typedef void (*rtl_fw_write_t)(struct rtl8169_private *tp, int reg, int val); +typedef int (*rtl_fw_read_t)(struct rtl8169_private *tp, int reg); + +#define RTL_VER_SIZE 32 + +struct rtl_fw { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; +}; + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw); diff --git a/devices/r8169/r8169_firmware-5.15-orig.c b/devices/r8169/r8169_firmware-5.15-orig.c new file mode 100644 index 00000000..cbc6b846 --- /dev/null +++ b/devices/r8169/r8169_firmware-5.15-orig.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* r8169_firmware.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169_firmware.h" + +enum rtl_fw_opcode { + PHY_READ = 0x0, + PHY_DATA_OR = 0x1, + PHY_DATA_AND = 0x2, + PHY_BJMPN = 0x3, + PHY_MDIO_CHG = 0x4, + PHY_CLEAR_READCOUNT = 0x7, + PHY_WRITE = 0x8, + PHY_READCOUNT_EQ_SKIP = 0x9, + PHY_COMP_EQ_SKIPN = 0xa, + PHY_COMP_NEQ_SKIPN = 0xb, + PHY_WRITE_PREVIOUS = 0xc, + PHY_SKIPN = 0xd, + PHY_DELAY_MS = 0xe, +}; + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0]) + +static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + + if (fw->size < FW_OPCODE_SIZE) + return false; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + return false; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + return false; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + return false; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, rtl_fw->fw_name, RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + + return true; +} + +static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + switch (action >> 28) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + + case PHY_BJMPN: + if (regno > index) + goto out; + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) + goto out; + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) + goto out; + break; + + default: + dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action); + return false; + } + } + + return true; +out: + dev_err(rtl_fw->dev, "Out of range of firmware\n"); + return false; +} + +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + rtl_fw_write_t fw_write = rtl_fw->phy_write; + rtl_fw_read_t fw_read = rtl_fw->phy_read; + int predata = 0, count = 0; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + enum rtl_fw_opcode opcode = action >> 28; + + if (!action) + break; + + switch (opcode) { + case PHY_READ: + predata = fw_read(tp, regno); + count++; + break; + case PHY_DATA_OR: + predata |= data; + break; + case PHY_DATA_AND: + predata &= data; + break; + case PHY_BJMPN: + index -= (regno + 1); + break; + case PHY_MDIO_CHG: + if (data) { + fw_write = rtl_fw->mac_mcu_write; + fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; + } + + break; + case PHY_CLEAR_READCOUNT: + count = 0; + break; + case PHY_WRITE: + fw_write(tp, regno, data); + break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index++; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + break; + case PHY_WRITE_PREVIOUS: + fw_write(tp, regno, predata); + break; + case PHY_SKIPN: + index += regno; + break; + case PHY_DELAY_MS: + msleep(data); + break; + } + } +} + +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw) +{ + release_firmware(rtl_fw->fw); +} + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw) +{ + int rc; + + rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); + if (rc < 0) + goto out; + + if (!rtl_fw_format_ok(rtl_fw) || !rtl_fw_data_ok(rtl_fw)) { + release_firmware(rtl_fw->fw); + rc = -EINVAL; + goto out; + } + + return 0; +out: + dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n", + rtl_fw->fw_name, rc); + return rc; +} diff --git a/devices/r8169/r8169_firmware-5.15-orig.h b/devices/r8169/r8169_firmware-5.15-orig.h new file mode 100644 index 00000000..7dc348ed --- /dev/null +++ b/devices/r8169/r8169_firmware-5.15-orig.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169_firmware.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +struct rtl8169_private; +typedef void (*rtl_fw_write_t)(struct rtl8169_private *tp, int reg, int val); +typedef int (*rtl_fw_read_t)(struct rtl8169_private *tp, int reg); + +#define RTL_VER_SIZE 32 + +struct rtl_fw { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; +}; + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw); diff --git a/devices/r8169/r8169_firmware-6.1-ethercat.c b/devices/r8169/r8169_firmware-6.1-ethercat.c new file mode 100644 index 00000000..435ab67a --- /dev/null +++ b/devices/r8169/r8169_firmware-6.1-ethercat.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* r8169_firmware.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169_firmware-6.1-ethercat.h" + +enum rtl_fw_opcode { + PHY_READ = 0x0, + PHY_DATA_OR = 0x1, + PHY_DATA_AND = 0x2, + PHY_BJMPN = 0x3, + PHY_MDIO_CHG = 0x4, + PHY_CLEAR_READCOUNT = 0x7, + PHY_WRITE = 0x8, + PHY_READCOUNT_EQ_SKIP = 0x9, + PHY_COMP_EQ_SKIPN = 0xa, + PHY_COMP_NEQ_SKIPN = 0xb, + PHY_WRITE_PREVIOUS = 0xc, + PHY_SKIPN = 0xd, + PHY_DELAY_MS = 0xe, +}; + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0]) + +static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + + if (fw->size < FW_OPCODE_SIZE) + return false; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + return false; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + return false; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + return false; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, rtl_fw->fw_name, RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + + return true; +} + +static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + switch (action >> 28) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + + case PHY_BJMPN: + if (regno > index) + goto out; + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) + goto out; + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) + goto out; + break; + + default: + dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action); + return false; + } + } + + return true; +out: + dev_err(rtl_fw->dev, "Out of range of firmware\n"); + return false; +} + +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + rtl_fw_write_t fw_write = rtl_fw->phy_write; + rtl_fw_read_t fw_read = rtl_fw->phy_read; + int predata = 0, count = 0; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + enum rtl_fw_opcode opcode = action >> 28; + + if (!action) + break; + + switch (opcode) { + case PHY_READ: + predata = fw_read(tp, regno); + count++; + break; + case PHY_DATA_OR: + predata |= data; + break; + case PHY_DATA_AND: + predata &= data; + break; + case PHY_BJMPN: + index -= (regno + 1); + break; + case PHY_MDIO_CHG: + if (data) { + fw_write = rtl_fw->mac_mcu_write; + fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; + } + + break; + case PHY_CLEAR_READCOUNT: + count = 0; + break; + case PHY_WRITE: + fw_write(tp, regno, data); + break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index++; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + break; + case PHY_WRITE_PREVIOUS: + fw_write(tp, regno, predata); + break; + case PHY_SKIPN: + index += regno; + break; + case PHY_DELAY_MS: + msleep(data); + break; + } + } +} + +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw) +{ + release_firmware(rtl_fw->fw); +} + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw) +{ + int rc; + + rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); + if (rc < 0) + goto out; + + if (!rtl_fw_format_ok(rtl_fw) || !rtl_fw_data_ok(rtl_fw)) { + release_firmware(rtl_fw->fw); + rc = -EINVAL; + goto out; + } + + return 0; +out: + dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n", + rtl_fw->fw_name, rc); + return rc; +} diff --git a/devices/r8169/r8169_firmware-6.1-ethercat.h b/devices/r8169/r8169_firmware-6.1-ethercat.h new file mode 100644 index 00000000..7dc348ed --- /dev/null +++ b/devices/r8169/r8169_firmware-6.1-ethercat.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169_firmware.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +struct rtl8169_private; +typedef void (*rtl_fw_write_t)(struct rtl8169_private *tp, int reg, int val); +typedef int (*rtl_fw_read_t)(struct rtl8169_private *tp, int reg); + +#define RTL_VER_SIZE 32 + +struct rtl_fw { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; +}; + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw); diff --git a/devices/r8169/r8169_firmware-6.1-orig.c b/devices/r8169/r8169_firmware-6.1-orig.c new file mode 100644 index 00000000..cbc6b846 --- /dev/null +++ b/devices/r8169/r8169_firmware-6.1-orig.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* r8169_firmware.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169_firmware.h" + +enum rtl_fw_opcode { + PHY_READ = 0x0, + PHY_DATA_OR = 0x1, + PHY_DATA_AND = 0x2, + PHY_BJMPN = 0x3, + PHY_MDIO_CHG = 0x4, + PHY_CLEAR_READCOUNT = 0x7, + PHY_WRITE = 0x8, + PHY_READCOUNT_EQ_SKIP = 0x9, + PHY_COMP_EQ_SKIPN = 0xa, + PHY_COMP_NEQ_SKIPN = 0xb, + PHY_WRITE_PREVIOUS = 0xc, + PHY_SKIPN = 0xd, + PHY_DELAY_MS = 0xe, +}; + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0]) + +static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + + if (fw->size < FW_OPCODE_SIZE) + return false; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + return false; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + return false; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + return false; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, rtl_fw->fw_name, RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + + return true; +} + +static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + switch (action >> 28) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + + case PHY_BJMPN: + if (regno > index) + goto out; + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) + goto out; + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) + goto out; + break; + + default: + dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action); + return false; + } + } + + return true; +out: + dev_err(rtl_fw->dev, "Out of range of firmware\n"); + return false; +} + +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + rtl_fw_write_t fw_write = rtl_fw->phy_write; + rtl_fw_read_t fw_read = rtl_fw->phy_read; + int predata = 0, count = 0; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + enum rtl_fw_opcode opcode = action >> 28; + + if (!action) + break; + + switch (opcode) { + case PHY_READ: + predata = fw_read(tp, regno); + count++; + break; + case PHY_DATA_OR: + predata |= data; + break; + case PHY_DATA_AND: + predata &= data; + break; + case PHY_BJMPN: + index -= (regno + 1); + break; + case PHY_MDIO_CHG: + if (data) { + fw_write = rtl_fw->mac_mcu_write; + fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; + } + + break; + case PHY_CLEAR_READCOUNT: + count = 0; + break; + case PHY_WRITE: + fw_write(tp, regno, data); + break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index++; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + break; + case PHY_WRITE_PREVIOUS: + fw_write(tp, regno, predata); + break; + case PHY_SKIPN: + index += regno; + break; + case PHY_DELAY_MS: + msleep(data); + break; + } + } +} + +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw) +{ + release_firmware(rtl_fw->fw); +} + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw) +{ + int rc; + + rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); + if (rc < 0) + goto out; + + if (!rtl_fw_format_ok(rtl_fw) || !rtl_fw_data_ok(rtl_fw)) { + release_firmware(rtl_fw->fw); + rc = -EINVAL; + goto out; + } + + return 0; +out: + dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n", + rtl_fw->fw_name, rc); + return rc; +} diff --git a/devices/r8169/r8169_firmware-6.1-orig.h b/devices/r8169/r8169_firmware-6.1-orig.h new file mode 100644 index 00000000..7dc348ed --- /dev/null +++ b/devices/r8169/r8169_firmware-6.1-orig.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* r8169_firmware.h: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +struct rtl8169_private; +typedef void (*rtl_fw_write_t)(struct rtl8169_private *tp, int reg, int val); +typedef int (*rtl_fw_read_t)(struct rtl8169_private *tp, int reg); + +#define RTL_VER_SIZE 32 + +struct rtl_fw { + rtl_fw_write_t phy_write; + rtl_fw_read_t phy_read; + rtl_fw_write_t mac_mcu_write; + rtl_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; +}; + +int rtl_fw_request_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_release_firmware(struct rtl_fw *rtl_fw); +void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw); diff --git a/devices/r8169/r8169_main-5.10-ethercat.c b/devices/r8169/r8169_main-5.10-ethercat.c new file mode 100644 index 00000000..91595421 --- /dev/null +++ b/devices/r8169/r8169_main-5.10-ethercat.c @@ -0,0 +1,5612 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "r8169-5.10-ethercat.h" +#include "r8169_firmware-5.10-ethercat.h" + +#include "../ecdev.h" + +#define MODULENAME "r8169" + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" +#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" +#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" +#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +#define MC_FILTER_LIMIT 32 + +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_RX_BUF_SIZE (SZ_16K - 1) +#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define OCP_STD_PHY_BASE 0xa400 + +#define RTL_CFG_NO_GBIT 1 + +/* write/read MMIO register */ +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg)) + +#define JUMBO_4K (4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) + +static const struct { + const char *name; + const char *fw_name; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, + [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, + [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, + [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, + [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, + [RTL_GIGA_MAC_VER_10] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e" }, + [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, + [RTL_GIGA_MAC_VER_16] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, + [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, + [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, + [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, + [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, + [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, + [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, + [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, + [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, + [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, + [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, + [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" }, + [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3}, + [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2}, + [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 }, + [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1}, + [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, + [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1}, + [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, + [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, + [RTL_GIGA_MAC_VER_60] = {"RTL8125A" }, + [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, + /* reserve 62 for CFG_METHOD_4 in the vendor driver */ + [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, +}; + +static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502) }, + { PCI_VDEVICE(REALTEK, 0x2600) }, + { PCI_VDEVICE(REALTEK, 0x8129) }, + { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT }, + { PCI_VDEVICE(REALTEK, 0x8161) }, + { PCI_VDEVICE(REALTEK, 0x8162) }, + { PCI_VDEVICE(REALTEK, 0x8167) }, + { PCI_VDEVICE(REALTEK, 0x8168) }, + { PCI_VDEVICE(NCUBE, 0x8168) }, + { PCI_VDEVICE(REALTEK, 0x8169) }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 }, + { PCI_VDEVICE(DLINK, 0x4300) }, + { PCI_VDEVICE(DLINK, 0x4302) }, + { PCI_VDEVICE(AT, 0xc107) }, + { PCI_VDEVICE(USR, 0x0116) }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 }, + { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 }, + { PCI_VDEVICE(REALTEK, 0x8125) }, + { PCI_VDEVICE(REALTEK, 0x3000) }, + {} +}; + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + +#define RTL_COALESCE_TX_USECS GENMASK(15, 12) +#define RTL_COALESCE_TX_FRAMES GENMASK(11, 8) +#define RTL_COALESCE_RX_USECS GENMASK(7, 4) +#define RTL_COALESCE_RX_FRAMES GENMASK(3, 0) + +#define RTL_COALESCE_T_MAX 0x0fU +#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_T_MAX * 4) + + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, + FuncForceEvent = 0xfc, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0000f000 +#define CSIAR_ADDR_MASK 0x00000fff + PMCH = 0x6f, + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl8125_registers { + IntrMask_8125 = 0x38, + IntrStatus_8125 = 0x3c, + TxPoll_8125 = 0x90, + MAC0_BKP = 0x19e0, + EEE_TXIDLE_TIMER_8125 = 0x6048, +}; + +#define RX_VLAN_INNER_8125 BIT(22) +#define RX_VLAN_OUTER_8125 BIT(23) +#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125) + +#define RX_FETCH_DFLT_8125 (8 << 27) + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, +#define RX_CONFIG_ACCEPT_ERR_MASK 0x30 + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_OK_MASK 0x0f +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + EnAnaPLL = (1 << 14), // 8169 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), +#define INTT_MASK GENMASK(1, 0) +#define CPCMD_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* ResetCounterCommand */ + CounterReset = 0x1, + + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* First doubleword. */ + TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ + TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7f + + /* Second doubleword. */ +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ff +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ + TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 0/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RTL_GSO_MAX_SIZE_V1 32000 +#define RTL_GSO_MAX_SEGS_V1 24 +#define RTL_GSO_MAX_SIZE_V2 64000 +#define RTL_GSO_MAX_SEGS_V2 64 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED = 0, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_MAX +}; + +struct rtl8169_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct rtl8169_stats rx_stats; + struct rtl8169_stats tx_stats; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + u16 cp_cmd; + u32 irq_mask; + struct clk *clk; + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct work_struct work; + } wk; + + unsigned supports_gmii:1; + unsigned aspm_manageable:1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + int eee_adv; + + const char *fw_name; + struct rtl_fw *rtl_fw; + + u32 ocp_base; + + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver (EtherCAT enabled)"); +MODULE_SOFTDEP("pre: realtek"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8168FP_3); +MODULE_FIRMWARE(FIRMWARE_8107E_1); +MODULE_FIRMWARE(FIRMWARE_8107E_2); +MODULE_FIRMWARE(FIRMWARE_8125A_3); +MODULE_FIRMWARE(FIRMWARE_8125B_2); + +static inline struct device *tp_to_dev(struct rtl8169_private *tp) +{ + return &tp->pci_dev->dev; +} + +static void rtl_lock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Lock); +} + +static void rtl_unlock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); +} + +static void rtl_pci_commit(struct rtl8169_private *tp) +{ + /* Read an arbitrary register to commit a preceding PCI write */ + RTL_R8(tp, ChipCmd); +} + +static bool rtl_is_8125(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_60; +} + +static bool rtl_is_8168evl_up(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_39 && + tp->mac_version <= RTL_GIGA_MAC_VER_52; +} + +static bool rtl_supports_eee(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_37 && + tp->mac_version != RTL_GIGA_MAC_VER_39; +} + +static void rtl_get_priv_stats(struct rtl8169_stats *stats, + u64 *pkts, u64 *bytes) +{ + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + *pkts = stats->packets; + *bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); +} + +static void rtl_inc_priv_stats(struct rtl8169_stats *stats, + u64 pkts, u64 bytes) +{ + u64_stats_update_begin(&stats->syncp); + stats->packets += pkts; + stats->bytes += bytes; + u64_stats_update_end(&stats->syncp); +} + +static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + mac[i] = RTL_R8(tp, reg + i); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + unsigned long usecs, int n, bool high) +{ + int i; + + for (i = 0; i < n; i++) { + if (c->check(tp) == high) + return true; + fsleep(usecs); + } + + if (net_ratelimit()) + netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n", + c->msg, !high, n, usecs); + return false; +} + +static bool rtl_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, true); +} + +static bool rtl_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg) +{ + if (reg & 0xffff0001) { + if (net_ratelimit()) + netdev_err(tp->dev, "Invalid ocp reg %x!\n", reg); + return true; + } + return false; +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(tp, GPHY_OCP, reg << 15); + + return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(tp, OCPDR, reg << 15); + + return RTL_R32(tp, OCPDR); +} + +static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, + u16 set) +{ + u16 data = r8168_mac_ocp_read(tp, reg); + + r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); +} + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (reg == 0x1f) + return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4; + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + return RTL_R32(tp, PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + return RTL_R32(tp, OCPAR) & OCPAR_FLAG; +} + +static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) +{ + RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); +} + +static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_1_mdio_access(tp, reg, + OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); +} + +static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) +{ + r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? + RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_2_mdio_start(tp); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(tp); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + /* Work around issue with chip reporting wrong PHY ID */ + if (reg == MII_PHYSID2) + return 0xc912; + + r8168dp_2_mdio_start(tp); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(tp); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, int val) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + r8168dp_1_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_2_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168g_mdio_write(tp, location, val); + break; + default: + r8169_mdio_write(tp, location, val); + break; + } +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + return r8168dp_1_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_2_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + return r8168g_mdio_read(tp, location); + default: + return r8169_mdio_read(tp, location); + } +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) +{ + /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ + if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB) + *cmd |= 0xf70 << 18; +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + return RTL_R32(tp, ERIAR) & ERIAR_FLAG; +} + +static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + u32 cmd = ERIAR_WRITE_CMD | type | mask | addr; + + BUG_ON((addr & 3) || (mask == 0)); + RTL_W32(tp, ERIDR, val); + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val) +{ + _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC); +} + +static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr; + + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(tp, ERIDR) : ~0; +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr) +{ + return _rtl_eri_read(tp, addr, ERIAR_EXGMAC); +} + +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m) +{ + u32 val = rtl_eri_read(tp, addr); + + rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p); +} + +static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p) +{ + rtl_w0w1_eri(tp, addr, p, 0); +} + +static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m) +{ + rtl_w0w1_eri(tp, addr, 0, m); +} + +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff)); + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(tp, OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + return _rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + RTL_W32(tp, OCPDR, data); + RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd); + + r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_dp_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return r8168ep_ocp_read(tp, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + return RTL_R8(tp, IBISR0) & 0x20; +} + +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); + rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000); + RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); + RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_start(tp); + break; + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: + rtl8168ep_driver_start(tp); + break; + default: + BUG(); + break; + } +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_stop(tp); + break; + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: + rtl8168ep_driver_stop(tp); + break; + default: + BUG(); + break; + } +} + +static bool r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return !!(r8168dp_ocp_read(tp, reg) & 0x00008000); +} + +static bool r8168ep_check_dash(struct rtl8169_private *tp) +{ + return r8168ep_ocp_read(tp, 0x128) & 0x00000001; +} + +static bool r8168_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp); + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: + return r8168ep_check_dash(tp); + default: + return false; + } +} + +static void rtl_reset_packet_filter(struct rtl8169_private *tp) +{ + rtl_eri_clear_bits(tp, 0xdc, BIT(0)); + rtl_eri_set_bits(tp, 0xdc, BIT(0)); +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; +} + +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u32 rtl_get_events(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + return RTL_R32(tp, IntrStatus_8125); + else + return RTL_R16(tp, IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u32 bits) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrStatus_8125, bits); + else + RTL_W16(tp, IntrStatus, bits); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, 0); + else + RTL_W16(tp, IntrMask, 0); +} + +static void rtl_irq_enable(struct rtl8169_private *tp) +{ + if (tp->ecdev) + return; + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, tp->irq_mask); + else + RTL_W16(tp, IntrMask, tp->irq_mask); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + rtl_irq_disable(tp); + rtl_ack_events(tp, 0xffffffff); + rtl_pci_commit(tp); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else if (phydev->speed == SPEED_100) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + rtl_reset_packet_filter(tp); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (phydev->speed == SPEED_10) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + } + } +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + wol->supported = WAKE_ANY; + wol->wolopts = tp->saved_wolopts; +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } + }; + unsigned int i, tmp = ARRAY_SIZE(cfg); + u8 options; + + rtl_unlock_config_regs(tp); + + if (rtl_is_8168evl_up(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2); + else + rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2); + } else if (rtl_is_8125(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0)); + else + r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); + } + + for (i = 0; i < tmp; i++) { + options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(tp, cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + options = RTL_R8(tp, Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(tp, Config1, options); + break; + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + options = RTL_R8(tp, Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(tp, Config2, options); + break; + default: + break; + } + + rtl_lock_config_regs(tp); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); + tp->dev->wol_enabled = wolopts ? 1 : 0; +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + + tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); + + return 0; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strlcpy(info->driver, MODULENAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (rtl_fw) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > ETH_DATA_LEN && + tp->mac_version > RTL_GIGA_MAC_VER_06) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO); + + return features; +} + +static void rtl_set_rx_config_features(struct rtl8169_private *tp, + netdev_features_t features) +{ + u32 rx_config = RTL_R32(tp, RxConfig); + + if (features & NETIF_F_RXALL) + rx_config |= RX_CONFIG_ACCEPT_ERR_MASK; + else + rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK; + + if (rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rx_config |= RX_VLAN_8125; + else + rx_config &= ~RX_VLAN_8125; + } + + RTL_W32(tp, RxConfig, rx_config); +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_set_rx_config_features(tp, features); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (!rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + } + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; + + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); +} + +static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) +{ + dma_addr_t paddr = tp->counters_phys_addr; + u32 cmd; + + RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + rtl_pci_commit(tp); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); + + rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); +} + +static void rtl8169_reset_counters(struct rtl8169_private *tp) +{ + /* + * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the + * tally counters. + */ + if (tp->mac_version >= RTL_GIGA_MAC_VER_19) + rtl8169_do_counters(tp, CounterReset); +} + +static void rtl8169_update_counters(struct rtl8169_private *tp) +{ + u8 val = RTL_R8(tp, ChipCmd); + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. If 0xff chip may be in a PCI power-save state. + */ + if (val & CmdRxEnb && val != 0xff) + rtl8169_do_counters(tp, CounterDump); +} + +static void rtl8169_init_counter_offsets(struct rtl8169_private *tp) +{ + struct rtl8169_counters *counters = tp->counters; + + /* + * rtl8169_init_counter_offsets is called from rtl_open. On chip + * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only + * reset by a power cycle, while the counter values collected by the + * driver are reset at every driver unload/load cycle. + * + * To make sure the HW values returned by @get_stats64 match the SW + * values, we collect the initial values at first open(*) and use them + * as offsets to normalize the values returned by @get_stats64. + * + * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one + * for the reason stated in rtl8169_update_counters; CmdRxEnb is only + * set at open time by rtl_hw_start. + */ + + if (tp->tc_offset.inited) + return; + + rtl8169_reset_counters(tp); + rtl8169_update_counters(tp); + + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.rx_missed = counters->rx_missed; + tp->tc_offset.inited = true; +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters; + + counters = tp->counters; + rtl8169_update_counters(tp); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +/* + * Interrupt coalescing + * + * > 1 - the availability of the IntrMitigate (0xe2) register through the + * > 8169, 8168 and 810x line of chipsets + * + * 8169, 8168, and 8136(810x) serial chipsets support it. + * + * > 2 - the Tx timer unit at gigabit speed + * + * The unit of the timer depends on both the speed and the setting of CPlusCmd + * (0xe0) bit 1 and bit 0. + * + * For 8169 + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 320ns 2.56us 40.96us + * 0 1 2.56us 20.48us 327.7us + * 1 0 5.12us 40.96us 655.4us + * 1 1 10.24us 81.92us 1.31ms + * + * For the other + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 5us 2.56us 40.96us + * 0 1 40us 20.48us 327.7us + * 1 0 80us 40.96us 655.4us + * 1 1 160us 81.92us 1.31ms + */ + +/* rx/tx scale factors for all CPlusCmd[0:1] cases */ +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +/* produce array with base delay *1, *8, *8*2, *8*2*2 */ +#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) } + +static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { + { SPEED_1000, COALESCE_DELAY(320) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; + +static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { + { SPEED_1000, COALESCE_DELAY(5000) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; +#undef COALESCE_DELAY + +/* get rx/tx scale vector corresponding to current speed */ +static const struct rtl_coalesce_info * +rtl_coalesce_info(struct rtl8169_private *tp) +{ + const struct rtl_coalesce_info *ci; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + ci = rtl_coalesce_info_8169; + else + ci = rtl_coalesce_info_8168_8136; + + /* if speed is unknown assume highest one */ + if (tp->phydev->speed == SPEED_UNKNOWN) + return ci; + + for (; ci->speed; ci++) { + if (tp->phydev->speed == ci->speed) + return ci; + } + + return ERR_PTR(-ELNRNG); +} + +static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct rtl8169_private *tp = netdev_priv(dev); + const struct rtl_coalesce_info *ci; + u32 scale, c_us, c_fr; + u16 intrmit; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + memset(ec, 0, sizeof(*ec)); + + /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK]; + + intrmit = RTL_R16(tp, IntrMitigate); + + c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit); + ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit); + /* ethtool_coalesce states usecs and max_frames must not both be 0 */ + ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit); + ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit); + ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + return 0; +} + +/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */ +static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec, + u16 *cp01) +{ + const struct rtl_coalesce_info *ci; + u16 i; + + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + for (i = 0; i < 4; i++) { + if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) { + *cp01 = i; + return ci->scale_nsecs[i]; + } + } + + return -ERANGE; +} + +static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 tx_fr = ec->tx_max_coalesced_frames; + u32 rx_fr = ec->rx_max_coalesced_frames; + u32 coal_usec_max, units; + u16 w = 0, cp01 = 0; + int scale; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX) + return -ERANGE; + + coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs); + scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01); + if (scale < 0) + return scale; + + /* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it + * not only when usecs=0 because of e.g. the following scenario: + * + * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX) + * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1 + * - then user does `ethtool -C eth0 rx-usecs 100` + * + * Since ethtool sends to kernel whole ethtool_coalesce settings, + * if we want to ignore rx_frames then it has to be set to 0. + */ + if (rx_fr == 1) + rx_fr = 0; + if (tx_fr == 1) + tx_fr = 0; + + /* HW requires time limit to be set if frame limit is set */ + if ((tx_fr && !ec->tx_coalesce_usecs) || + (rx_fr && !ec->rx_coalesce_usecs)) + return -EINVAL; + + w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4)); + w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4)); + + units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units); + units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units); + + RTL_W16(tp, IntrMitigate, w); + + /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */ + if (rtl_is_8168evl_up(tp)) { + if (!rx_fr && !tx_fr) + /* disable packet counter */ + tp->cp_cmd |= PktCntrDisable; + else + tp->cp_cmd &= ~PktCntrDisable; + } + + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + return phy_ethtool_get_eee(tp->phydev, data); +} + +static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_eee(tp->phydev, data); + + if (!ret) + tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, + MDIO_AN_EEE_ADV); + return ret; +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_coalesce = rtl_get_coalesce, + .set_coalesce = rtl_set_coalesce, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, + .nway_reset = phy_ethtool_nway_reset, + .get_eee = rtl8169_get_eee, + .set_eee = rtl8169_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static void rtl_enable_eee(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int adv; + + /* respect EEE advertisement the user may have set */ + if (tp->eee_adv >= 0) + adv = tp->eee_adv; + else + adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + + if (adv >= 0) + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); +} + +static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) +{ + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; + } mac_info[] = { + /* 8125B family. */ + { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, + + /* 8125A family. */ + { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, + { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + + /* RTL8117 */ + { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 }, + + /* 8168EP family. */ + { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, + { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, + { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, + + /* 8168H family. */ + { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 }, + { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, + + /* 8168G family. */ + { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, + { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 }, + { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, + { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 }, + { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, + { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 }, + { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 }, + { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 }, + { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, + { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, + { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 }, + { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 }, + { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 }, + { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 }, + { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 }, + { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 }, + { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 }, + { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, + { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, + { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 }, + { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 }, + { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 }, + { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 }, + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 }, + { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 }, + { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 }, + { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR */ + { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 }, + { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 }, + + /* 8110 family. */ + { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 }, + { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 }, + { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 }, + { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 }, + { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 }, + + /* Catch-all */ + { 0x000, 0x000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + enum mac_version ver; + + while ((xid & p->mask) != p->val) + p++; + ver = p->ver; + + if (ver != RTL_GIGA_MAC_NONE && !gmii) { + if (ver == RTL_GIGA_MAC_VER_42) + ver = RTL_GIGA_MAC_VER_43; + else if (ver == RTL_GIGA_MAC_VER_45) + ver = RTL_GIGA_MAC_VER_47; + else if (ver == RTL_GIGA_MAC_VER_46) + ver = RTL_GIGA_MAC_VER_48; + } + + return ver; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (tp->rtl_fw) { + rtl_fw_release_firmware(tp->rtl_fw); + kfree(tp->rtl_fw); + tp->rtl_fw = NULL; + } +} + +void r8169_apply_firmware(struct rtl8169_private *tp) +{ + int val; + + /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ + if (tp->rtl_fw) { + rtl_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + + /* PHY soft reset may still be in progress */ + phy_read_poll_timeout(tp->phydev, MII_BMCR, val, + !(val & BMCR_RESET), + 50000, 600000, true); + } +} + +static void rtl8168_config_eee_mac(struct rtl8169_private *tp) +{ + /* Adjust EEE LED frequency */ + if (tp->mac_version != RTL_GIGA_MAC_VER_38) + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + rtl_eri_set_bits(tp, 0x1b0, 0x0003); +} + +static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) +{ + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); + r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); +} + +static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp) +{ + RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20); +} + +static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) +{ + rtl8125_set_eee_txidle_timer(tp); + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) +{ + const u16 w[] = { + addr[0] | (addr[1] << 8), + addr[2] | (addr[3] << 8), + addr[4] | (addr[5] << 8) + }; + + rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16)); + rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]); + rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16); + rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16)); +} + +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) +{ + u16 data1, data2, ioffset; + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data1 = r8168_mac_ocp_read(tp, 0xdd02); + data2 = r8168_mac_ocp_read(tp, 0xdd00); + + ioffset = (data2 >> 1) & 0x7ff8; + ioffset |= data2 & 0x0007; + if (data1 & BIT(7)) + ioffset |= BIT(15); + + return ioffset; +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + set_bit(flag, tp->wk.flags); + if (!tp->ecdev) + schedule_work(&tp->wk.work); +} + +static void rtl8169_init_phy(struct rtl8169_private *tp) +{ + r8169_hw_phy_config(tp, tp->phydev, tp->mac_version); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + /* set undocumented MAC Reg C+CR Offset 0x82h */ + RTL_W8(tp, 0x82, 0x01); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_05 && + tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE && + tp->pci_dev->subsystem_device == 0xe000) + phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b); + + /* We may have called phy_speed_down before */ + phy_speed_up(tp->phydev); + + if (rtl_supports_eee(tp)) + rtl_enable_eee(tp); + + genphy_soft_reset(tp->phydev); +} + +static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) +{ + rtl_unlock_config_regs(tp); + + RTL_W32(tp, MAC4, addr[4] | addr[5] << 8); + rtl_pci_commit(tp); + + RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); + rtl_pci_commit(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + rtl_lock_config_regs(tp); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + break; + default: + break; + } +} + +static void rtl_pll_power_down(struct rtl8169_private *tp) +{ + if (r8168_check_dash(tp)) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (device_may_wakeup(tp_to_dev(tp))) { + phy_speed_down(tp->phydev, false); + rtl_wol_suspend_quirk(tp); + return; + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); + break; + default: + break; + } +} + +static void rtl_pll_power_up(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); + break; + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); + rtl_eri_set_bits(tp, 0x1a8, 0xfc000000); + break; + default: + break; + } + + phy_resume(tp->phydev); +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); + break; + default: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x24); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); +} + +static void rtl_jumbo_config(struct rtl8169_private *tp) +{ + bool jumbo = tp->dev->mtu > ETH_DATA_LEN; + int readrq = 4096; + + rtl_unlock_config_regs(tp); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + if (jumbo) { + readrq = 512; + r8168b_1_hw_jumbo_enable(tp); + } else { + r8168b_1_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: + if (jumbo) { + readrq = 512; + r8168c_hw_jumbo_enable(tp); + } else { + r8168c_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: + if (jumbo) + r8168dp_hw_jumbo_enable(tp); + else + r8168dp_hw_jumbo_disable(tp); + break; + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: + if (jumbo) { + pcie_set_readrq(tp->pci_dev, 512); + r8168e_hw_jumbo_enable(tp); + } else { + r8168e_hw_jumbo_disable(tp); + } + break; + default: + break; + } + rtl_lock_config_regs(tp); + + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) + pcie_set_readrq(tp->pci_dev, readrq); + + /* Chip doesn't support pause in jumbo mode */ + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising, !jumbo); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising, !jumbo); + phy_start_aneg(tp->phydev); +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + return RTL_R8(tp, ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + RTL_W8(tp, ChipCmd, CmdReset); + + rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + + /* firmware loaded already or no firmware available */ + if (tp->rtl_fw || !tp->fw_name) + return; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + return; + + rtl_fw->phy_write = rtl_writephy; + rtl_fw->phy_read = rtl_readphy; + rtl_fw->mac_mcu_write = mac_mcu_write; + rtl_fw->mac_mcu_read = mac_mcu_read; + rtl_fw->fw_name = tp->fw_name; + rtl_fw->dev = tp_to_dev(tp); + + if (rtl_fw_request_firmware(rtl_fw)) + kfree(rtl_fw); + else + tp->rtl_fw = rtl_fw; +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + return RTL_R8(tp, TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond_2) +{ + /* IntrMitigate has new functionality on RTL8125 */ + return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103; +} + +static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52: + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_63: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); + break; + default: + break; + } +} + +static void rtl_enable_rxdvgate(struct rtl8169_private *tp) +{ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); + fsleep(2000); + rtl_wait_txrx_fifo_empty(tp); +} + +static void rtl_set_tx_config_registers(struct rtl8169_private *tp) +{ + u32 val = TX_DMA_BURST << TxDMAShift | + InterFrameGap << TxInterFrameGapShift; + + if (rtl_is_8168evl_up(tp)) + val |= TXCFG_AUTO_FIFO; + + RTL_W32(tp, TxConfig, val); +} + +static void rtl_set_rx_max_size(struct rtl8169_private *tp) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static void rtl8169_set_magic_reg(struct rtl8169_private *tp) +{ + u32 val; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + val = 0x000fff00; + else if (tp->mac_version == RTL_GIGA_MAC_VER_06) + val = 0x00ffff00; + else + return; + + if (RTL_R8(tp, Config2) & PCI_Clock_66MHz) + val |= 0xff; + + RTL_W32(tp, 0x7c, val); +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast; + /* Multicast hash filter */ + u32 mc_filter[2] = { 0xffffffff, 0xffffffff }; + struct rtl8169_private *tp = netdev_priv(dev); + u32 tmp; + + if (dev->flags & IFF_PROMISC) { + rx_mode |= AcceptAllPhys; + } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || + dev->flags & IFF_ALLMULTI || + tp->mac_version == RTL_GIGA_MAC_VER_35) { + /* accept all multicasts */ + } else if (netdev_mc_empty(dev)) { + rx_mode &= ~AcceptMulticast; + } else { + struct netdev_hw_addr *ha; + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + u32 bit_nr = eth_hw_addr_crc(ha) >> 26; + mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31); + } + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); + } + } + + RTL_W32(tp, MAR0 + 4, mc_filter[1]); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); + + tmp = RTL_R32(tp, RxConfig); + RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + return RTL_R32(tp, CSIAR) & CSIAR_FLAG; +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE | func << 16); + + rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | + CSIAR_BYTE_ENABLE); + + return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(tp, CSIDR) : ~0; +} + +static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) +{ + struct pci_dev *pdev = tp->pci_dev; + u32 csi; + + /* According to Realtek the value at config space address 0x070f + * controls the L0s/L1 entrance latency. We try standard ECAM access + * first and if it fails fall back to CSI. + */ + if (pdev->cfg_size > 0x070f && + pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) + return; + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | val << 24); +} + +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x27); +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void __rtl_ephy_init(struct rtl8169_private *tp, + const struct ephy_info *e, int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a)) + +static void rtl_disable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) +{ + /* work around an issue when PCI reset occurs during L2/L3 state */ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); +} + +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + /* Don't enable ASPM in the chip if OS can't control ASPM */ + if (enable && tp->aspm_manageable) { + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } + + udelay(10); +} + +static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat, + u16 tx_stat, u16 rx_dyn, u16 tx_dyn) +{ + /* Usage of dynamic vs. static FIFO is controlled by bit + * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known. + */ + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn); +} + +static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp, + u8 low, u8 high) +{ + /* FIFO thresholds for pause flow control */ + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high); +} + +static void rtl_hw_start_8168b(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168cp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(tp, DBG_REG, 0x20); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0020 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168c_2); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8168c_2(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, 0x0000, 0x0048 }, + { 0x19, 0x0020, 0x0050 }, + { 0x0c, 0x0100, 0x0020 }, + { 0x10, 0x0004, 0x0000 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168d_4); + + rtl_enable_clock_request(tp); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_1); + + rtl_disable_clock_request(tp); + + /* Reset tx FIFO pointer */ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); + + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_2); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_eri_set_bits(tp, 0x0d4, 0x1f00); + rtl_eri_set_bits(tp, 0x1d0, BIT(1)); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl8168_config_eee_mac(tp); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1); + + rtl_eri_set_bits(tp, 0x0d4, 0x1f00); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_disable(tp); + + rtl_ephy_init(tp, e_info_8168f_1); + + rtl_eri_set_bits(tp, 0x0d4, 0x0c00); +} + +static void rtl_hw_start_8168g(struct rtl8169_private *tp) +{ + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_eri_set_bits(tp, 0x0d4, 0x1f80); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_1[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8000, 0x0000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_1); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 }, + { 0x00, 0xffff, 0x10a3 }, + { 0x06, 0xffff, 0xf050 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x4000, 0x0000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_2); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x00, 0x0000, 0x0080 }, + { 0x06, 0x0000, 0x0010 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x0000, 0x4000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8411_2); + + /* The following Realtek-provided magic fixes an issue with the RX unit + * getting confused after the PHY having been powered-down. + */ + r8168_mac_ocp_write(tp, 0xFC28, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + r8168_mac_ocp_write(tp, 0xFC30, 0x0000); + r8168_mac_ocp_write(tp, 0xFC32, 0x0000); + r8168_mac_ocp_write(tp, 0xFC34, 0x0000); + r8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + r8168_mac_ocp_write(tp, 0xFC26, 0x0000); + + r8168_mac_ocp_write(tp, 0xF800, 0xE008); + r8168_mac_ocp_write(tp, 0xF802, 0xE00A); + r8168_mac_ocp_write(tp, 0xF804, 0xE00C); + r8168_mac_ocp_write(tp, 0xF806, 0xE00E); + r8168_mac_ocp_write(tp, 0xF808, 0xE027); + r8168_mac_ocp_write(tp, 0xF80A, 0xE04F); + r8168_mac_ocp_write(tp, 0xF80C, 0xE05E); + r8168_mac_ocp_write(tp, 0xF80E, 0xE065); + r8168_mac_ocp_write(tp, 0xF810, 0xC602); + r8168_mac_ocp_write(tp, 0xF812, 0xBE00); + r8168_mac_ocp_write(tp, 0xF814, 0x0000); + r8168_mac_ocp_write(tp, 0xF816, 0xC502); + r8168_mac_ocp_write(tp, 0xF818, 0xBD00); + r8168_mac_ocp_write(tp, 0xF81A, 0x074C); + r8168_mac_ocp_write(tp, 0xF81C, 0xC302); + r8168_mac_ocp_write(tp, 0xF81E, 0xBB00); + r8168_mac_ocp_write(tp, 0xF820, 0x080A); + r8168_mac_ocp_write(tp, 0xF822, 0x6420); + r8168_mac_ocp_write(tp, 0xF824, 0x48C2); + r8168_mac_ocp_write(tp, 0xF826, 0x8C20); + r8168_mac_ocp_write(tp, 0xF828, 0xC516); + r8168_mac_ocp_write(tp, 0xF82A, 0x64A4); + r8168_mac_ocp_write(tp, 0xF82C, 0x49C0); + r8168_mac_ocp_write(tp, 0xF82E, 0xF009); + r8168_mac_ocp_write(tp, 0xF830, 0x74A2); + r8168_mac_ocp_write(tp, 0xF832, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF834, 0x74A0); + r8168_mac_ocp_write(tp, 0xF836, 0xC50E); + r8168_mac_ocp_write(tp, 0xF838, 0x9CA2); + r8168_mac_ocp_write(tp, 0xF83A, 0x1C11); + r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF83E, 0xE006); + r8168_mac_ocp_write(tp, 0xF840, 0x74F8); + r8168_mac_ocp_write(tp, 0xF842, 0x48C4); + r8168_mac_ocp_write(tp, 0xF844, 0x8CF8); + r8168_mac_ocp_write(tp, 0xF846, 0xC404); + r8168_mac_ocp_write(tp, 0xF848, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84A, 0xC403); + r8168_mac_ocp_write(tp, 0xF84C, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2); + r8168_mac_ocp_write(tp, 0xF850, 0x0C0A); + r8168_mac_ocp_write(tp, 0xF852, 0xE434); + r8168_mac_ocp_write(tp, 0xF854, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF856, 0x49D9); + r8168_mac_ocp_write(tp, 0xF858, 0xF01F); + r8168_mac_ocp_write(tp, 0xF85A, 0xC526); + r8168_mac_ocp_write(tp, 0xF85C, 0x64A5); + r8168_mac_ocp_write(tp, 0xF85E, 0x1400); + r8168_mac_ocp_write(tp, 0xF860, 0xF007); + r8168_mac_ocp_write(tp, 0xF862, 0x0C01); + r8168_mac_ocp_write(tp, 0xF864, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF866, 0x1C15); + r8168_mac_ocp_write(tp, 0xF868, 0xC51B); + r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF86C, 0xE013); + r8168_mac_ocp_write(tp, 0xF86E, 0xC519); + r8168_mac_ocp_write(tp, 0xF870, 0x74A0); + r8168_mac_ocp_write(tp, 0xF872, 0x48C4); + r8168_mac_ocp_write(tp, 0xF874, 0x8CA0); + r8168_mac_ocp_write(tp, 0xF876, 0xC516); + r8168_mac_ocp_write(tp, 0xF878, 0x74A4); + r8168_mac_ocp_write(tp, 0xF87A, 0x48C8); + r8168_mac_ocp_write(tp, 0xF87C, 0x48CA); + r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4); + r8168_mac_ocp_write(tp, 0xF880, 0xC512); + r8168_mac_ocp_write(tp, 0xF882, 0x1B00); + r8168_mac_ocp_write(tp, 0xF884, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF886, 0x1B1C); + r8168_mac_ocp_write(tp, 0xF888, 0x483F); + r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2); + r8168_mac_ocp_write(tp, 0xF88C, 0x1B04); + r8168_mac_ocp_write(tp, 0xF88E, 0xC508); + r8168_mac_ocp_write(tp, 0xF890, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF892, 0xC505); + r8168_mac_ocp_write(tp, 0xF894, 0xBD00); + r8168_mac_ocp_write(tp, 0xF896, 0xC502); + r8168_mac_ocp_write(tp, 0xF898, 0xBD00); + r8168_mac_ocp_write(tp, 0xF89A, 0x0300); + r8168_mac_ocp_write(tp, 0xF89C, 0x051E); + r8168_mac_ocp_write(tp, 0xF89E, 0xE434); + r8168_mac_ocp_write(tp, 0xF8A0, 0xE018); + r8168_mac_ocp_write(tp, 0xF8A2, 0xE092); + r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20); + r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F); + r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4); + r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3); + r8168_mac_ocp_write(tp, 0xF8AE, 0xF007); + r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0); + r8168_mac_ocp_write(tp, 0xF8B2, 0xF103); + r8168_mac_ocp_write(tp, 0xF8B4, 0xC607); + r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8B8, 0xC606); + r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8BC, 0xC602); + r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C); + r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28); + r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C); + r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00); + r8168_mac_ocp_write(tp, 0xF8C8, 0xC707); + r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00); + r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2); + r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1); + r8168_mac_ocp_write(tp, 0xF8D0, 0xC502); + r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA); + r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0); + r8168_mac_ocp_write(tp, 0xF8D8, 0xC502); + r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8DC, 0x0132); + + r8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + r8168_mac_ocp_write(tp, 0xFC2A, 0x0743); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0801); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9); + r8168_mac_ocp_write(tp, 0xFC30, 0x02FD); + r8168_mac_ocp_write(tp, 0xFC32, 0x0C25); + r8168_mac_ocp_write(tp, 0xFC34, 0x00A9); + r8168_mac_ocp_write(tp, 0xFC36, 0x012D); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x854a }, + { 0x01, 0xffff, 0x068b } + }; + int rg_saw_cnt; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168h_1); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + rtl_eri_set_bits(tp, 0xdc, 0x001c); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008); + r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f80); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_1[] = { + { 0x00, 0xffff, 0x10ab }, + { 0x06, 0xffff, 0xf030 }, + { 0x08, 0xffff, 0x2006 }, + { 0x0d, 0xffff, 0x1666 }, + { 0x0c, 0x3ff0, 0x0000 } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_1); + + rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_2[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20ea } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_2); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0x0000, 0x0080 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_3); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8117(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8117[] = { + { 0x19, 0x0040, 0x1100 }, + { 0x59, 0x0040, 0x1100 }, + }; + int rg_saw_cnt; + + rtl8168ep_stop_cmac(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8117); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f90); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_write(tp, 0xea80, 0x0003); + r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + /* firmware is for MAC only */ + r8169_apply_firmware(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, FIX_NAK_1); + + RTL_W8(tp, Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + cfg1 = RTL_R8(tp, Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(tp, Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8401(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8401[] = { + { 0x01, 0xffff, 0x6fe5 }, + { 0x03, 0xffff, 0x0599 }, + { 0x06, 0xffff, 0xaf25 }, + { 0x07, 0xffff, 0x8e68 }, + }; + + rtl_ephy_init(tp, e_info_8401); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402); + + rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + rtl_hw_aspm_clkreq_enable(tp, false); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond) +{ + return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13); +} + +static void rtl_hw_start_8125_common(struct rtl8169_private *tp) +{ + rtl_pcie_state_l2l3_disable(tp); + + RTL_W16(tp, 0x382, 0x221b); + RTL_W8(tp, 0x4500, 0); + RTL_W16(tp, 0x4800, 0); + + /* disable UPS */ + r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10); + + r8168_mac_ocp_write(tp, 0xc140, 0xffff); + r8168_mac_ocp_write(tp, 0xc142, 0xffff); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + /* disable new tx descriptor format */ + r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); + else + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000); + else + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); + r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); + r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); + r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); + r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); + r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); + r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00); + r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); + + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001); + udelay(1); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000); + RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030); + + r8168_mac_ocp_write(tp, 0xe098, 0xc302); + + rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + rtl8125b_config_eee_mac(tp); + else + rtl8125a_config_eee_mac(tp); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + udelay(10); +} + +static void rtl_hw_start_8125a_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_1[] = { + { 0x01, 0xffff, 0xa812 }, + { 0x09, 0xffff, 0x520c }, + { 0x04, 0xffff, 0xd000 }, + { 0x0d, 0xffff, 0xf702 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x06, 0xffff, 0x001e }, + { 0x08, 0xffff, 0x3595 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x02, 0xffff, 0x6046 }, + { 0x29, 0xffff, 0xfe00 }, + { 0x23, 0xffff, 0xab62 }, + + { 0x41, 0xffff, 0xa80c }, + { 0x49, 0xffff, 0x520c }, + { 0x44, 0xffff, 0xd000 }, + { 0x4d, 0xffff, 0xf702 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x46, 0xffff, 0x001e }, + { 0x48, 0xffff, 0x3595 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x42, 0xffff, 0x6046 }, + { 0x69, 0xffff, 0xfe00 }, + { 0x63, 0xffff, 0xab62 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_1); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_2[] = { + { 0x04, 0xffff, 0xd000 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x23, 0xffff, 0xab66 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x29, 0xffff, 0xfe04 }, + + { 0x44, 0xffff, 0xd000 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x63, 0xffff, 0xab66 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x69, 0xffff, 0xfe04 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_2); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125b(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125b[] = { + { 0x0b, 0xffff, 0xa908 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x4b, 0xffff, 0xa908 }, + { 0x5e, 0xffff, 0x20eb }, + { 0x22, 0x0030, 0x0020 }, + { 0x62, 0x0030, 0x0020 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_aspm_clkreq_enable(tp, false); + + rtl_ephy_init(tp, e_info_8125b); + rtl_hw_start_8125_common(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_config(struct rtl8169_private *tp) +{ + static const rtl_generic_fct hw_configs[] = { + [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1, + [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, + [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, + [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1, + [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3, + [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4, + [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2, + [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, + [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, + [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, + [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, + [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2, + [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402, + [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411, + [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106, + [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2, + [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1, + [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2, + [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1, + [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, + [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, + }; + + if (hw_configs[tp->mac_version]) + hw_configs[tp->mac_version](tp); +} + +static void rtl_hw_start_8125(struct rtl8169_private *tp) +{ + int i; + + /* disable interrupt coalescing */ + for (i = 0xa00; i < 0xb00; i += 4) + RTL_W32(tp, i, 0); + + rtl_hw_config(tp); +} + +static void rtl_hw_start_8168(struct rtl8169_private *tp) +{ + if (rtl_is_8168evl_up(tp)) + RTL_W8(tp, MaxTxPacketSize, EarlySize); + else + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + rtl_hw_config(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start_8169(struct rtl8169_private *tp) +{ + RTL_W8(tp, EarlyTxThres, NoEarlyTx); + + tp->cp_cmd |= PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) + tp->cp_cmd |= EnAnaPLL; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start(struct rtl8169_private *tp) +{ + rtl_unlock_config_regs(tp); + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + rtl_hw_start_8169(tp); + else if (rtl_is_8125(tp)) + rtl_hw_start_8125(tp); + else + rtl_hw_start_8168(tp); + + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + rtl_lock_config_regs(tp); + + rtl_jumbo_config(tp); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + rtl_pci_commit(tp); + + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_init_rxcfg(tp); + rtl_set_tx_config_registers(tp); + rtl_set_rx_config_features(tp, tp->dev->features); + rtl_set_rx_mode(tp->dev); + rtl_irq_enable(tp); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + dev->mtu = new_mtu; + netdev_update_features(dev); + rtl_jumbo_config(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + rtl8125_set_eee_txidle_timer(tp); + break; + default: + break; + } + + return 0; +} + +static void rtl8169_mark_to_asic(struct RxDesc *desc) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts2 = 0; + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE)); +} + +static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + struct device *d = tp_to_dev(tp); + int node = dev_to_node(d); + dma_addr_t mapping; + struct page *data; + + data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE)); + if (!data) + return NULL; + + mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + netdev_err(tp->dev, "Failed to map RX DMA!\n"); + __free_pages(data, get_order(R8169_RX_BUF_SIZE)); + return NULL; + } + + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc); + + return data; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) { + dma_unmap_page(tp_to_dev(tp), + le64_to_cpu(tp->RxDescArray[i].addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE)); + tp->Rx_databuff[i] = NULL; + tp->RxDescArray[i].addr = 0; + tp->RxDescArray[i].opts1 = 0; + } +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + struct page *data; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_rx_clear(tp); + return -ENOMEM; + } + tp->Rx_databuff[i] = data; + } + + /* mark as last descriptor in the ring */ + tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd); + + return 0; +} + +static int rtl8169_init_ring(struct rtl8169_private *tp) +{ + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); + memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry) +{ + struct ring_info *tx_skb = tp->tx_skb + entry; + struct TxDesc *desc = tp->TxDescArray + entry; + + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len, + DMA_TO_DEVICE); + memset(desc, 0, sizeof(*desc)); + memset(tx_skb, 0, sizeof(*tx_skb)); +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(tp, entry); + if (!tp->ecdev && skb) + dev_consume_skb_any(skb); + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + if (!tp->ecdev) + netdev_reset_queue(tp->dev); +} + +static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) +{ + if (!tp->ecdev) + napi_disable(&tp->napi); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_net(); + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + if (going_down && tp->dev->wol_enabled) + goto no_reset; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + rtl_enable_rxdvgate(tp); + fsleep(2000); + break; + default: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + fsleep(100); + break; + } + + rtl_hw_reset(tp); +no_reset: + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + int i; + + if (!tp->ecdev) + netif_stop_queue(tp->dev); + + rtl8169_cleanup(tp, false); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i); + + if (!tp->ecdev) + napi_enable(&tp->napi); + rtl_hw_start(tp); +} + +static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len, + void *addr, unsigned int entry, bool desc_own) +{ + struct TxDesc *txd = tp->TxDescArray + entry; + struct device *d = tp_to_dev(tp); + dma_addr_t mapping; + u32 opts1; + int ret; + + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + ret = dma_mapping_error(d, mapping); + if (unlikely(ret)) { + if (net_ratelimit()) + netdev_err(tp->dev, "Failed to map TX data!\n"); + return ret; + } + + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts[1]); + + opts1 = opts[0] | len; + if (entry == NUM_TX_DESC - 1) + opts1 |= RingEnd; + if (desc_own) + opts1 |= DescOwn; + txd->opts1 = cpu_to_le32(opts1); + + tp->tx_skb[entry].len = len; + + return 0; +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + const u32 *opts, unsigned int entry) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag; + + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + void *addr = skb_frag_address(frag); + u32 len = skb_frag_size(frag); + + entry = (entry + 1) % NUM_TX_DESC; + + if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true))) + goto err_out; + } + + return 0; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_skb_is_udp(struct sk_buff *skb) +{ + int no = skb_network_offset(skb); + struct ipv6hdr *i6h, _i6h; + struct iphdr *ih, _ih; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih); + return ih && ih->protocol == IPPROTO_UDP; + case htons(ETH_P_IPV6): + i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h); + return i6h && i6h->nexthdr == IPPROTO_UDP; + default: + return false; + } +} + +#define RTL_MIN_PATCH_LEN 47 + +/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */ +static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto = 0, len = skb->len; + + if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && + rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { + unsigned int trans_data_len = skb_tail_pointer(skb) - + skb_transport_header(skb); + + if (trans_data_len >= offsetof(struct udphdr, len) && + trans_data_len < RTL_MIN_PATCH_LEN) { + u16 dest = ntohs(udp_hdr(skb)->dest); + + /* dest is a standard PTP port */ + if (dest == 319 || dest == 320) + padto = len + RTL_MIN_PATCH_LEN - trans_data_len; + } + + if (trans_data_len < sizeof(struct udphdr)) + padto = max_t(unsigned int, padto, + len + sizeof(struct udphdr) - trans_data_len); + } + + return padto; +} + +static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto; + + padto = rtl8125_quirk_udp_padto(tp, skb); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_60: + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + padto = max_t(unsigned int, padto, ETH_ZLEN); + default: + break; + } + + return padto; +} + +static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) +{ + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + opts[0] |= TD_LSO; + opts[0] |= mss << TD0_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[0] |= TD0_IP_CS | TD0_TCP_CS; + else if (ip->protocol == IPPROTO_UDP) + opts[0] |= TD0_IP_CS | TD0_UDP_CS; + else + WARN_ON_ONCE(1); + } +} + +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 transport_offset = (u32)skb_transport_offset(skb); + struct skb_shared_info *shinfo = skb_shinfo(skb); + u32 mss = shinfo->gso_size; + + if (mss) { + if (shinfo->gso_type & SKB_GSO_TCPV4) { + opts[0] |= TD1_GTSENV4; + } else if (shinfo->gso_type & SKB_GSO_TCPV6) { + if (skb_cow_head(skb, 0)) + return false; + + tcp_v6_gso_csum_prep(skb); + opts[0] |= TD1_GTSENV6; + } else { + WARN_ON_ONCE(1); + } + + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= mss << TD1_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_protocol; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + opts[1] |= TD1_IPv4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts[1] |= TD1_IPv6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + opts[1] |= TD1_TCP_CS; + else if (ip_protocol == IPPROTO_UDP) + opts[1] |= TD1_UDP_CS; + else + WARN_ON_ONCE(1); + + opts[1] |= transport_offset << TCPHO_SHIFT; + } else { + unsigned int padto = rtl_quirk_packet_padto(tp, skb); + + /* skb_padto would free the skb on error */ + return !__skb_put_padto(skb, padto, false); + } + + return true; +} + +static bool rtl_tx_slots_avail(struct rtl8169_private *tp, + unsigned int nr_frags) +{ + unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx; + + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ + return slots_avail > nr_frags; +} + +/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ +static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + return false; + default: + return true; + } +} + +static void rtl8169_doorbell(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W16(tp, TxPoll_8125, BIT(0)); + else + RTL_W8(tp, TxPoll, NPQ); +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int frags = skb_shinfo(skb)->nr_frags; + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd_first, *txd_last; + bool stop_queue = 0, door_bell = 0; + u32 opts[2]; + + txd_first = tp->TxDescArray + entry; + + if (unlikely(!rtl_tx_slots_avail(tp, frags))) { + if (net_ratelimit()) + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + if (unlikely(le32_to_cpu(txd_first->opts1) & DescOwn)) + goto err_stop_0; + + opts[1] = rtl8169_tx_vlan_tag(skb); + opts[0] = 0; + + if (!rtl_chip_supports_csum_v2(tp)) + rtl8169_tso_csum_v1(skb, opts); + else if (!rtl8169_tso_csum_v2(tp, skb, opts)) + goto err_dma_0; + + if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data, + entry, false))) + goto err_dma_0; + + if (frags) { + if (rtl8169_xmit_frags(tp, skb, opts, entry)) + goto err_dma_1; + entry = (entry + frags) % NUM_TX_DESC; + } + + txd_last = tp->TxDescArray + entry; + txd_last->opts1 |= cpu_to_le32(LastFrag); + tp->tx_skb[entry].skb = skb; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + door_bell = tp->ecdev || __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + + txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag); + + /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */ + smp_wmb(); + + tp->cur_tx += frags + 1; + + stop_queue = !tp->ecdev && !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); + if (unlikely(stop_queue)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + door_bell = true; + } + + if (door_bell) + rtl8169_doorbell(tp); + + if (unlikely(stop_queue)) { + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb(); + if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) + netif_start_queue(dev); + } + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(tp, entry); +err_dma_0: + if (!tp->ecdev) + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + if (!tp->ecdev) + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static unsigned int rtl_last_frag_len(struct sk_buff *skb) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int nr_frags = info->nr_frags; + + if (!nr_frags) + return UINT_MAX; + + return skb_frag_size(info->frags + nr_frags - 1); +} + +/* Workaround for hw issues with TSO on RTL8168evl */ +static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb, + netdev_features_t features) +{ + /* IPv4 header has options field */ + if (vlan_get_protocol(skb) == htons(ETH_P_IP) && + ip_hdrlen(skb) > sizeof(struct iphdr)) + features &= ~NETIF_F_ALL_TSO; + + /* IPv4 TCP header has options field */ + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 && + tcp_hdrlen(skb) > sizeof(struct tcphdr)) + features &= ~NETIF_F_ALL_TSO; + + else if (rtl_last_frag_len(skb) <= 6) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static netdev_features_t rtl8169_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + int transport_offset = skb_transport_offset(skb); + struct rtl8169_private *tp = netdev_priv(dev); + + if (skb_is_gso(skb)) { + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + features = rtl8168evl_fix_tso(skb, features); + + if (transport_offset > GTTCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_ALL_TSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* work around hw bug on some chip versions */ + if (skb->len < ETH_ZLEN) + features &= ~NETIF_F_CSUM_MASK; + + if (rtl_quirk_packet_padto(tp, skb)) + features &= ~NETIF_F_CSUM_MASK; + + if (transport_offset > TCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_CSUM_MASK; + } + + return vlan_features_check(skb, features); +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int pci_status_errs; + u16 pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + + pci_status_errs = pci_status_get_and_clear_errors(pdev); + + if (net_ratelimit()) + netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n", + pci_cmd, pci_status_errs); + /* + * The recovery sequence below admits a very elaborated explanation: + * - it seems to work; + * - I did not see what else could be done; + * - it makes iop3xx happy. + * + * Feel free to adjust to your needs. + */ + if (pdev->broken_parity_status) + pci_cmd &= ~PCI_COMMAND_PARITY; + else + pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; + + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, + int budget) +{ + unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0; + + dirty_tx = tp->dirty_tx; + smp_rmb(); + + for (tx_left = tp->cur_tx - dirty_tx; tx_left > 0; tx_left--) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + struct sk_buff *skb = tp->tx_skb[entry].skb; + u32 status; + + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + rtl8169_unmap_tx_skb(tp, entry); + + if (skb) { + pkts_compl++; + bytes_compl += skb->len; + if (!tp->ecdev) + napi_consume_skb(skb, budget); + } + dirty_tx++; + } + + if (tp->dirty_tx != dirty_tx) { + if (!tp->ecdev) { + netdev_completed_queue(dev, pkts_compl, bytes_compl); + } + + rtl_inc_priv_stats(&tp->tx_stats, pkts_compl, bytes_compl); + + tp->dirty_tx = dirty_tx; + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_mb(); + if (!tp->ecdev && netif_queue_stopped(dev) && + rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { + netif_wake_queue(dev); + } + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + */ + if (tp->cur_tx != dirty_tx) + rtl8169_doorbell(tp); + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & RxProtoMask; + + if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || + ((status == RxProtoUDP) && !(opts1 & UDPFail))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) +{ + unsigned int cur_rx, rx_left, count; + struct device *d = tp_to_dev(tp); + + cur_rx = tp->cur_rx; + + for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { + unsigned int pkt_size, entry = cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + struct sk_buff *skb; + const void *rx_buf; + dma_addr_t addr; + u32 status; + + status = le32_to_cpu(desc->opts1); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Rx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + if (unlikely(status & RxRES)) { + if (net_ratelimit()) + netdev_warn(dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + + if (!(dev->features & NETIF_F_RXALL)) + goto release_descriptor; + else if (status & RxRWT || !(status & (RxRUNT | RxCRC))) + goto release_descriptor; + } + + pkt_size = status & GENMASK(13, 0); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size -= ETH_FCS_LEN; + + /* The driver does not support incoming fragmented frames. + * They are seen as a symptom of over-mtu sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + if (!tp->ecdev) { + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + } else { + skb = NULL; + } + + addr = le64_to_cpu(desc->addr); + rx_buf = page_address(tp->Rx_databuff[entry]); + + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(rx_buf); + if (tp->ecdev) { + ecdev_receive(tp->ecdev, rx_buf, pkt_size); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + tp->ec_watchdog_jiffies = jiffies; + + } else { + skb_copy_to_linear_data(skb, rx_buf, pkt_size); + skb->tail += pkt_size; + skb->len = pkt_size; + } + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + if (!tp->ecdev) { + rtl8169_rx_csum(skb, status); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + napi_gro_receive(&tp->napi, skb); + + } + rtl_inc_priv_stats(&tp->rx_stats, 1, pkt_size); +release_descriptor: + rtl8169_mark_to_asic(desc); + } + + count = cur_rx - tp->cur_rx; + tp->cur_rx = cur_rx; + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct rtl8169_private *tp = dev_instance; + u32 status = rtl_get_events(tp); + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return IRQ_NONE; + + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(tp->dev); + goto out; + } + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + if (unlikely(status & RxFIFOOver && + tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(tp->dev); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + } + + rtl_irq_disable(tp); + napi_schedule(&tp->napi); +out: + rtl_ack_events(tp, status); + + return IRQ_HANDLED; +} + +static void rtl_task(struct work_struct *work) +{ + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + + rtnl_lock(); + + if (!netif_running(tp->dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) { + rtl_reset_work(tp); + netif_wake_queue(tp->dev); + } +out_unlock: + rtnl_unlock(); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + int work_done; + + work_done = rtl_rx(dev, tp, (u32) budget); + + rtl_tx(dev, tp, budget); + + if (work_done < budget && napi_complete_done(napi, work_done)) + rtl_irq_enable(tp); + + return work_done; +} + +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + if (net_ratelimit()) + phy_print_status(tp->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + phy_attached_info(phydev); + + return 0; +} + +static void rtl8169_down(struct rtl8169_private *tp) +{ + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + phy_stop(tp->phydev); + + rtl8169_update_counters(tp); + + pci_clear_master(tp->pci_dev); + rtl_pci_commit(tp); + + rtl8169_cleanup(tp, true); + + rtl_pll_power_down(tp); +} + +static void rtl8169_up(struct rtl8169_private *tp) +{ + pci_set_master(tp->pci_dev); + rtl_pll_power_up(tp); + rtl8169_init_phy(tp); + if (!tp->ecdev) + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_reset_work(tp); + + phy_start(tp->phydev); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + if (!tp->ecdev) + netif_stop_queue(dev); + rtl8169_down(tp); + rtl8169_rx_clear(tp); + + cancel_work_sync(&tp->wk.work); + + if (!tp->ecdev) + free_irq(pci_irq_vector(pdev, 0), tp); + + phy_disconnect(tp->phydev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_pm_runtime_put; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(tp); + if (retval < 0) + goto err_free_rx_1; + + rtl_request_firmware(tp); + + if (!tp->ecdev) { + retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, + IRQF_SHARED, dev->name, tp); + if (retval < 0) + goto err_release_fw_2; + } + + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + + rtl8169_up(tp); + rtl8169_init_counter_offsets(tp); + if (!tp->ecdev) + netif_start_queue(dev); + else + ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + + pm_runtime_put_sync(&pdev->dev); +out: + return retval; + +err_free_irq: + free_irq(pci_irq_vector(pdev, 0), tp); +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; +err_pm_runtime_put: + pm_runtime_put_noidle(&pdev->dev); + goto out; +} + +static void +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + struct rtl8169_counters *counters = tp->counters; + + pm_runtime_get_noresume(&pdev->dev); + + netdev_stats_to_stats64(stats, &dev->stats); + + rtl_get_priv_stats(&tp->rx_stats, &stats->rx_packets, &stats->rx_bytes); + rtl_get_priv_stats(&tp->tx_stats, &stats->tx_packets, &stats->tx_bytes); + + /* + * Fetch additional counter values missing in stats collected by driver + * from tally counters. + */ + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(tp); + + /* + * Subtract values fetched during initalization. + * See rtl8169_init_counter_offsets for a description why we do that. + */ + stats->tx_errors = le64_to_cpu(counters->tx_errors) - + le64_to_cpu(tp->tc_offset.tx_errors); + stats->collisions = le32_to_cpu(counters->tx_multi_collision) - + le32_to_cpu(tp->tc_offset.tx_multi_collision); + stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - + le16_to_cpu(tp->tc_offset.tx_aborted); + stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) - + le16_to_cpu(tp->tc_offset.rx_missed); + + pm_runtime_put_noidle(&pdev->dev); +} + +static void rtl8169_net_suspend(struct rtl8169_private *tp) +{ + netif_device_detach(tp->dev); + + if (netif_running(tp->dev)) + rtl8169_down(tp); +} + +#ifdef CONFIG_PM + +static int rtl8169_net_resume(struct rtl8169_private *tp) +{ + rtl_rar_set(tp, tp->dev->dev_addr); + + if (tp->TxDescArray) + rtl8169_up(tp); + + netif_device_attach(tp->dev); + + return 0; +} + +static int __maybe_unused rtl8169_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + rtnl_lock(); + rtl8169_net_suspend(tp); + if (!device_may_wakeup(tp_to_dev(tp))) + clk_disable_unprepare(tp->clk); + rtnl_unlock(); + + return 0; +} + +static int __maybe_unused rtl8169_resume(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + if (!device_may_wakeup(tp_to_dev(tp))) + clk_prepare_enable(tp->clk); + + /* Reportedly at least Asus X453MA truncates packets otherwise */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37) + rtl_init_rxcfg(tp); + + return rtl8169_net_resume(tp); +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + if (!tp->TxDescArray) { + netif_device_detach(tp->dev); + return 0; + } + + rtnl_lock(); + __rtl8169_set_wol(tp, WAKE_PHY); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + return 0; +} + +static int rtl8169_runtime_resume(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + __rtl8169_set_wol(tp, tp->saved_wolopts); + + return rtl8169_net_resume(tp); +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev)) + pm_schedule_suspend(device, 10000); + + return -EBUSY; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume) + SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume, + rtl8169_runtime_idle) +}; + +#endif /* CONFIG_PM */ + +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(tp, ChipCmd, CmdRxEnb); + rtl_pci_commit(tp); + break; + default: + break; + } +} + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + rtnl_lock(); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + /* Restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); + + if (system_state == SYSTEM_POWER_OFF) { + if (tp->saved_wolopts) { + rtl_wol_suspend_quirk(tp); + rtl_wol_shutdown_quirk(tp); + } + + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + if (tp->ecdev) { + ecdev_close(tp->ecdev); + ecdev_withdraw(tp->ecdev); + } else { + unregister_netdev(tp->dev); + } + + if (r8168_check_dash(tp)) + rtl8168_driver_stop(tp); + + rtl_release_firmware(tp); + + /* restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_features_check = rtl8169_features_check, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static void rtl_set_irq_mask(struct rtl8169_private *tp) +{ + tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver; + else if (tp->mac_version == RTL_GIGA_MAC_VER_11) + /* special workaround needed */ + tp->irq_mask |= RxFIFOOver; + else + tp->irq_mask |= RxOverflow; +} + +static int rtl_alloc_irq(struct rtl8169_private *tp) +{ + unsigned int flags; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + rtl_unlock_config_regs(tp); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); + rtl_lock_config_regs(tp); + fallthrough; + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: + flags = PCI_IRQ_LEGACY; + break; + default: + flags = PCI_IRQ_ALL_TYPES; + break; + } + + return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); +} + +static void rtl_read_mac_address(struct rtl8169_private *tp, + u8 mac_addr[ETH_ALEN]) +{ + /* Get MAC address */ + if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) { + u32 value = rtl_eri_read(tp, 0xe0); + + mac_addr[0] = (value >> 0) & 0xff; + mac_addr[1] = (value >> 8) & 0xff; + mac_addr[2] = (value >> 16) & 0xff; + mac_addr[3] = (value >> 24) & 0xff; + + value = rtl_eri_read(tp, 0xe4); + mac_addr[4] = (value >> 0) & 0xff; + mac_addr[5] = (value >> 8) & 0xff; + } else if (rtl_is_8125(tp)) { + rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP); + } +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + return RTL_R8(tp, MCU) & LINK_LIST_RDY; +} + +static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp) +{ + rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42); +} + +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_IGNORE_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = devm_mdiobus_register(&pdev->dev, new_bus); + if (ret) + return ret; + + tp->phydev = mdiobus_get_phy(new_bus, 0); + if (!tp->phydev) { + return -ENODEV; + } else if (!tp->phydev->drv) { + /* Most chip versions fail with the genphy driver. + * Therefore ensure that the dedicated PHY driver is loaded. + */ + dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n", + tp->phydev->phy_id); + return -EUNATCH; + } + + /* PHY will be woken up in rtl_open() */ + phy_suspend(tp->phydev); + + return 0; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15)); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_init_8125(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0); + r8168_mac_ocp_write(tp, 0xc0a6, 0x0150); + r8168_mac_ocp_write(tp, 0xc01e, 0x5555); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: + rtl8168ep_stop_cmac(tp); + fallthrough; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + rtl_hw_init_8125(tp); + break; + default: + break; + } +} + +static int rtl_jumbo_max(struct rtl8169_private *tp) +{ + /* Non-GBit versions don't support jumbo frames */ + if (!tp->supports_gmii) + return 0; + + switch (tp->mac_version) { + /* RTL8169 */ + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + return JUMBO_7K; + /* RTL8168b */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + return JUMBO_4K; + /* RTL8168c */ + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + return JUMBO_6K; + default: + return JUMBO_9K; + } +} + +static void rtl_disable_clk(void *data) +{ + clk_disable_unprepare(data); +} + +static int rtl_get_ether_clk(struct rtl8169_private *tp) +{ + struct device *d = tp_to_dev(tp); + struct clk *clk; + int rc; + + clk = devm_clk_get(d, "ether_clk"); + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -ENOENT) + /* clk-core allows NULL (for suspend / resume) */ + rc = 0; + else if (rc != -EPROBE_DEFER) + dev_err(d, "failed to get clk: %d\n", rc); + } else { + tp->clk = clk; + rc = clk_prepare_enable(clk); + if (rc) + dev_err(d, "failed to enable clk: %d\n", rc); + else + rc = devm_add_action_or_reset(d, rtl_disable_clk, clk); + } + + return rc; +} + +static void rtl_init_mac_address(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u8 *mac_addr = dev->dev_addr; + int rc; + + rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); + if (!rc) + goto done; + + rtl_read_mac_address(tp, mac_addr); + if (is_valid_ether_addr(mac_addr)) + goto done; + + rtl_read_mac_from_reg(tp, mac_addr, MAC0); + if (is_valid_ether_addr(mac_addr)) + goto done; + + eth_hw_addr_random(dev); + dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); +done: + rtl_rar_set(tp, mac_addr); +} + +static void ec_poll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u16 status = rtl_get_events(tp); + + if ((jiffies - tp->ec_watchdog_jiffies) >= 2 * HZ) { + ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + tp->ec_watchdog_jiffies = jiffies; + } + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return; + + rtl_tx(dev, tp, 100); + + rtl_rx(dev, tp, 100); + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + + rtl_ack_events(tp, status); +} + + +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct rtl8169_private *tp; + int jumbo_max, region, rc; + enum mac_version chipset; + struct net_device *dev; + u16 xid; + + dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; + tp->eee_adv = -1; + tp->ocp_base = OCP_STD_PHY_BASE; + + /* Get the *optional* external "ether_clk" used on some boards */ + rc = rtl_get_ether_clk(tp); + if (rc) + return rc; + + /* Disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + */ + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1); + tp->aspm_manageable = !rc; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pcim_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "enable failure\n"); + return rc; + } + + if (pcim_set_mwi(pdev) < 0) + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); + + /* use first MMIO region */ + region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; + if (region < 0) { + dev_err(&pdev->dev, "no MMIO resource found\n"); + return -ENODEV; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); + return -ENODEV; + } + + rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME); + if (rc < 0) { + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); + return rc; + } + + tp->mmio_addr = pcim_iomap_table(pdev)[region]; + + xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf; + + /* Identify chip attached to board */ + chipset = rtl8169_get_mac_version(xid, tp->supports_gmii); + if (chipset == RTL_GIGA_MAC_NONE) { + dev_err(&pdev->dev, "unknown chip XID %03x\n", xid); + return -ENODEV; + } + + tp->mac_version = chipset; + + tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK; + + if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev->features |= NETIF_F_HIGHDMA; + + rtl_init_rxcfg(tp); + + rtl8169_irq_mask_and_ack(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rc = rtl_alloc_irq(tp); + if (rc < 0) { + dev_err(&pdev->dev, "Can't allocate interrupt\n"); + return rc; + } + + INIT_WORK(&tp->wk.work, rtl_task); + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); + + rtl_init_mac_address(tp); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* Disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (rtl_chip_supports_csum_v2(tp)) + dev->hw_features |= NETIF_F_IPV6_CSUM; + + dev->features |= dev->hw_features; + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ + if (rtl_chip_supports_csum_v2(tp)) { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; + } else { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; + } + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* configure chip for default features */ + rtl8169_set_features(dev, dev->features); + + jumbo_max = rtl_jumbo_max(tp); + if (jumbo_max) + dev->max_mtu = jumbo_max; + + rtl_set_irq_mask(tp); + + tp->fw_name = rtl_chip_infos[chipset].fw_name; + + tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, + GFP_KERNEL); + if (!tp->counters) + return -ENOMEM; + + pci_set_drvdata(pdev, tp); + + rc = r8169_mdio_register(tp); + if (rc) + return rc; + + tp->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ec_watchdog_jiffies = jiffies; + + /* chip gets powered up in rtl_open() */ + rtl_pll_power_down(tp); + + if (!tp->ecdev) { + rc = register_netdev(dev); + if (rc) + return rc; + } + + netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n", + rtl_chip_infos[chipset].name, dev->dev_addr, xid, + pci_irq_vector(pdev, 0)); + + if (jumbo_max) + netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n", + jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? + "ok" : "ko"); + + if (r8168_check_dash(tp)) { + netdev_info(dev, "DASH enabled\n"); + rtl8168_driver_start(tp); + } + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_sync(&pdev->dev); + + if (tp->ecdev) { + rc = ecdev_open(tp->ecdev); + if (rc) { + ecdev_withdraw(tp->ecdev); + return rc; + } + } + + return 0; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = MODULENAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, +#ifdef CONFIG_PM + .driver.pm = &rtl8169_pm_ops, +#endif +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/devices/r8169/r8169_main-5.10-orig.c b/devices/r8169/r8169_main-5.10-orig.c new file mode 100644 index 00000000..5eac3f49 --- /dev/null +++ b/devices/r8169/r8169_main-5.10-orig.c @@ -0,0 +1,5522 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "r8169.h" +#include "r8169_firmware.h" + +#define MODULENAME "r8169" + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" +#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" +#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" +#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +#define MC_FILTER_LIMIT 32 + +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_RX_BUF_SIZE (SZ_16K - 1) +#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define OCP_STD_PHY_BASE 0xa400 + +#define RTL_CFG_NO_GBIT 1 + +/* write/read MMIO register */ +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg)) + +#define JUMBO_4K (4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) + +static const struct { + const char *name; + const char *fw_name; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, + [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, + [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, + [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, + [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, + [RTL_GIGA_MAC_VER_10] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e" }, + [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, + [RTL_GIGA_MAC_VER_16] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, + [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, + [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, + [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, + [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, + [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, + [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, + [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, + [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, + [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, + [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, + [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" }, + [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3}, + [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2}, + [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 }, + [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1}, + [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, + [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1}, + [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, + [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, + [RTL_GIGA_MAC_VER_60] = {"RTL8125A" }, + [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, + /* reserve 62 for CFG_METHOD_4 in the vendor driver */ + [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, +}; + +static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502) }, + { PCI_VDEVICE(REALTEK, 0x2600) }, + { PCI_VDEVICE(REALTEK, 0x8129) }, + { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT }, + { PCI_VDEVICE(REALTEK, 0x8161) }, + { PCI_VDEVICE(REALTEK, 0x8162) }, + { PCI_VDEVICE(REALTEK, 0x8167) }, + { PCI_VDEVICE(REALTEK, 0x8168) }, + { PCI_VDEVICE(NCUBE, 0x8168) }, + { PCI_VDEVICE(REALTEK, 0x8169) }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 }, + { PCI_VDEVICE(DLINK, 0x4300) }, + { PCI_VDEVICE(DLINK, 0x4302) }, + { PCI_VDEVICE(AT, 0xc107) }, + { PCI_VDEVICE(USR, 0x0116) }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 }, + { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 }, + { PCI_VDEVICE(REALTEK, 0x8125) }, + { PCI_VDEVICE(REALTEK, 0x3000) }, + {} +}; + +MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + +#define RTL_COALESCE_TX_USECS GENMASK(15, 12) +#define RTL_COALESCE_TX_FRAMES GENMASK(11, 8) +#define RTL_COALESCE_RX_USECS GENMASK(7, 4) +#define RTL_COALESCE_RX_FRAMES GENMASK(3, 0) + +#define RTL_COALESCE_T_MAX 0x0fU +#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_T_MAX * 4) + + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, + FuncForceEvent = 0xfc, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0000f000 +#define CSIAR_ADDR_MASK 0x00000fff + PMCH = 0x6f, + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl8125_registers { + IntrMask_8125 = 0x38, + IntrStatus_8125 = 0x3c, + TxPoll_8125 = 0x90, + MAC0_BKP = 0x19e0, + EEE_TXIDLE_TIMER_8125 = 0x6048, +}; + +#define RX_VLAN_INNER_8125 BIT(22) +#define RX_VLAN_OUTER_8125 BIT(23) +#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125) + +#define RX_FETCH_DFLT_8125 (8 << 27) + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, +#define RX_CONFIG_ACCEPT_ERR_MASK 0x30 + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_OK_MASK 0x0f +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + EnAnaPLL = (1 << 14), // 8169 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), +#define INTT_MASK GENMASK(1, 0) +#define CPCMD_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* ResetCounterCommand */ + CounterReset = 0x1, + + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* First doubleword. */ + TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ + TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7f + + /* Second doubleword. */ +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ff +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ + TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 0/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RTL_GSO_MAX_SIZE_V1 32000 +#define RTL_GSO_MAX_SEGS_V1 24 +#define RTL_GSO_MAX_SIZE_V2 64000 +#define RTL_GSO_MAX_SEGS_V2 64 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED = 0, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_MAX +}; + +struct rtl8169_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct rtl8169_stats rx_stats; + struct rtl8169_stats tx_stats; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + u16 cp_cmd; + u32 irq_mask; + struct clk *clk; + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct work_struct work; + } wk; + + unsigned supports_gmii:1; + unsigned aspm_manageable:1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + int eee_adv; + + const char *fw_name; + struct rtl_fw *rtl_fw; + + u32 ocp_base; +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); +MODULE_SOFTDEP("pre: realtek"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8168FP_3); +MODULE_FIRMWARE(FIRMWARE_8107E_1); +MODULE_FIRMWARE(FIRMWARE_8107E_2); +MODULE_FIRMWARE(FIRMWARE_8125A_3); +MODULE_FIRMWARE(FIRMWARE_8125B_2); + +static inline struct device *tp_to_dev(struct rtl8169_private *tp) +{ + return &tp->pci_dev->dev; +} + +static void rtl_lock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Lock); +} + +static void rtl_unlock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); +} + +static void rtl_pci_commit(struct rtl8169_private *tp) +{ + /* Read an arbitrary register to commit a preceding PCI write */ + RTL_R8(tp, ChipCmd); +} + +static bool rtl_is_8125(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_60; +} + +static bool rtl_is_8168evl_up(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_39 && + tp->mac_version <= RTL_GIGA_MAC_VER_52; +} + +static bool rtl_supports_eee(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_37 && + tp->mac_version != RTL_GIGA_MAC_VER_39; +} + +static void rtl_get_priv_stats(struct rtl8169_stats *stats, + u64 *pkts, u64 *bytes) +{ + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + *pkts = stats->packets; + *bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); +} + +static void rtl_inc_priv_stats(struct rtl8169_stats *stats, + u64 pkts, u64 bytes) +{ + u64_stats_update_begin(&stats->syncp); + stats->packets += pkts; + stats->bytes += bytes; + u64_stats_update_end(&stats->syncp); +} + +static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + mac[i] = RTL_R8(tp, reg + i); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + unsigned long usecs, int n, bool high) +{ + int i; + + for (i = 0; i < n; i++) { + if (c->check(tp) == high) + return true; + fsleep(usecs); + } + + if (net_ratelimit()) + netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n", + c->msg, !high, n, usecs); + return false; +} + +static bool rtl_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, true); +} + +static bool rtl_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg) +{ + if (reg & 0xffff0001) { + if (net_ratelimit()) + netdev_err(tp->dev, "Invalid ocp reg %x!\n", reg); + return true; + } + return false; +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(tp, GPHY_OCP, reg << 15); + + return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(tp, OCPDR, reg << 15); + + return RTL_R32(tp, OCPDR); +} + +static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, + u16 set) +{ + u16 data = r8168_mac_ocp_read(tp, reg); + + r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); +} + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (reg == 0x1f) + return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4; + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + return RTL_R32(tp, PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + return RTL_R32(tp, OCPAR) & OCPAR_FLAG; +} + +static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) +{ + RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); +} + +static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_1_mdio_access(tp, reg, + OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); +} + +static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) +{ + r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? + RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_2_mdio_start(tp); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(tp); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + /* Work around issue with chip reporting wrong PHY ID */ + if (reg == MII_PHYSID2) + return 0xc912; + + r8168dp_2_mdio_start(tp); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(tp); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, int val) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + r8168dp_1_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_2_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168g_mdio_write(tp, location, val); + break; + default: + r8169_mdio_write(tp, location, val); + break; + } +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + return r8168dp_1_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_2_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + return r8168g_mdio_read(tp, location); + default: + return r8169_mdio_read(tp, location); + } +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) +{ + /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ + if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB) + *cmd |= 0xf70 << 18; +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + return RTL_R32(tp, ERIAR) & ERIAR_FLAG; +} + +static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + u32 cmd = ERIAR_WRITE_CMD | type | mask | addr; + + BUG_ON((addr & 3) || (mask == 0)); + RTL_W32(tp, ERIDR, val); + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val) +{ + _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC); +} + +static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr; + + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(tp, ERIDR) : ~0; +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr) +{ + return _rtl_eri_read(tp, addr, ERIAR_EXGMAC); +} + +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m) +{ + u32 val = rtl_eri_read(tp, addr); + + rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p); +} + +static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p) +{ + rtl_w0w1_eri(tp, addr, p, 0); +} + +static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m) +{ + rtl_w0w1_eri(tp, addr, 0, m); +} + +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff)); + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(tp, OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + return _rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + RTL_W32(tp, OCPDR, data); + RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd); + + r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_dp_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return r8168ep_ocp_read(tp, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + return RTL_R8(tp, IBISR0) & 0x20; +} + +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); + rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000); + RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); + RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_start(tp); + break; + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: + rtl8168ep_driver_start(tp); + break; + default: + BUG(); + break; + } +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_stop(tp); + break; + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: + rtl8168ep_driver_stop(tp); + break; + default: + BUG(); + break; + } +} + +static bool r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return !!(r8168dp_ocp_read(tp, reg) & 0x00008000); +} + +static bool r8168ep_check_dash(struct rtl8169_private *tp) +{ + return r8168ep_ocp_read(tp, 0x128) & 0x00000001; +} + +static bool r8168_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp); + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: + return r8168ep_check_dash(tp); + default: + return false; + } +} + +static void rtl_reset_packet_filter(struct rtl8169_private *tp) +{ + rtl_eri_clear_bits(tp, 0xdc, BIT(0)); + rtl_eri_set_bits(tp, 0xdc, BIT(0)); +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; +} + +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u32 rtl_get_events(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + return RTL_R32(tp, IntrStatus_8125); + else + return RTL_R16(tp, IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u32 bits) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrStatus_8125, bits); + else + RTL_W16(tp, IntrStatus, bits); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, 0); + else + RTL_W16(tp, IntrMask, 0); +} + +static void rtl_irq_enable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, tp->irq_mask); + else + RTL_W16(tp, IntrMask, tp->irq_mask); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + rtl_irq_disable(tp); + rtl_ack_events(tp, 0xffffffff); + rtl_pci_commit(tp); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else if (phydev->speed == SPEED_100) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + rtl_reset_packet_filter(tp); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (phydev->speed == SPEED_10) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + } + } +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + wol->supported = WAKE_ANY; + wol->wolopts = tp->saved_wolopts; +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } + }; + unsigned int i, tmp = ARRAY_SIZE(cfg); + u8 options; + + rtl_unlock_config_regs(tp); + + if (rtl_is_8168evl_up(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2); + else + rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2); + } else if (rtl_is_8125(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0)); + else + r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); + } + + for (i = 0; i < tmp; i++) { + options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(tp, cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + options = RTL_R8(tp, Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(tp, Config1, options); + break; + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + options = RTL_R8(tp, Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(tp, Config2, options); + break; + default: + break; + } + + rtl_lock_config_regs(tp); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); + tp->dev->wol_enabled = wolopts ? 1 : 0; +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + + tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); + + return 0; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strlcpy(info->driver, MODULENAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (rtl_fw) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > ETH_DATA_LEN && + tp->mac_version > RTL_GIGA_MAC_VER_06) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO); + + return features; +} + +static void rtl_set_rx_config_features(struct rtl8169_private *tp, + netdev_features_t features) +{ + u32 rx_config = RTL_R32(tp, RxConfig); + + if (features & NETIF_F_RXALL) + rx_config |= RX_CONFIG_ACCEPT_ERR_MASK; + else + rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK; + + if (rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rx_config |= RX_VLAN_8125; + else + rx_config &= ~RX_VLAN_8125; + } + + RTL_W32(tp, RxConfig, rx_config); +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_set_rx_config_features(tp, features); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (!rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + } + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; + + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); +} + +static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) +{ + dma_addr_t paddr = tp->counters_phys_addr; + u32 cmd; + + RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + rtl_pci_commit(tp); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); + + rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); +} + +static void rtl8169_reset_counters(struct rtl8169_private *tp) +{ + /* + * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the + * tally counters. + */ + if (tp->mac_version >= RTL_GIGA_MAC_VER_19) + rtl8169_do_counters(tp, CounterReset); +} + +static void rtl8169_update_counters(struct rtl8169_private *tp) +{ + u8 val = RTL_R8(tp, ChipCmd); + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. If 0xff chip may be in a PCI power-save state. + */ + if (val & CmdRxEnb && val != 0xff) + rtl8169_do_counters(tp, CounterDump); +} + +static void rtl8169_init_counter_offsets(struct rtl8169_private *tp) +{ + struct rtl8169_counters *counters = tp->counters; + + /* + * rtl8169_init_counter_offsets is called from rtl_open. On chip + * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only + * reset by a power cycle, while the counter values collected by the + * driver are reset at every driver unload/load cycle. + * + * To make sure the HW values returned by @get_stats64 match the SW + * values, we collect the initial values at first open(*) and use them + * as offsets to normalize the values returned by @get_stats64. + * + * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one + * for the reason stated in rtl8169_update_counters; CmdRxEnb is only + * set at open time by rtl_hw_start. + */ + + if (tp->tc_offset.inited) + return; + + rtl8169_reset_counters(tp); + rtl8169_update_counters(tp); + + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.rx_missed = counters->rx_missed; + tp->tc_offset.inited = true; +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters; + + counters = tp->counters; + rtl8169_update_counters(tp); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +/* + * Interrupt coalescing + * + * > 1 - the availability of the IntrMitigate (0xe2) register through the + * > 8169, 8168 and 810x line of chipsets + * + * 8169, 8168, and 8136(810x) serial chipsets support it. + * + * > 2 - the Tx timer unit at gigabit speed + * + * The unit of the timer depends on both the speed and the setting of CPlusCmd + * (0xe0) bit 1 and bit 0. + * + * For 8169 + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 320ns 2.56us 40.96us + * 0 1 2.56us 20.48us 327.7us + * 1 0 5.12us 40.96us 655.4us + * 1 1 10.24us 81.92us 1.31ms + * + * For the other + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 5us 2.56us 40.96us + * 0 1 40us 20.48us 327.7us + * 1 0 80us 40.96us 655.4us + * 1 1 160us 81.92us 1.31ms + */ + +/* rx/tx scale factors for all CPlusCmd[0:1] cases */ +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +/* produce array with base delay *1, *8, *8*2, *8*2*2 */ +#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) } + +static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { + { SPEED_1000, COALESCE_DELAY(320) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; + +static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { + { SPEED_1000, COALESCE_DELAY(5000) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; +#undef COALESCE_DELAY + +/* get rx/tx scale vector corresponding to current speed */ +static const struct rtl_coalesce_info * +rtl_coalesce_info(struct rtl8169_private *tp) +{ + const struct rtl_coalesce_info *ci; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + ci = rtl_coalesce_info_8169; + else + ci = rtl_coalesce_info_8168_8136; + + /* if speed is unknown assume highest one */ + if (tp->phydev->speed == SPEED_UNKNOWN) + return ci; + + for (; ci->speed; ci++) { + if (tp->phydev->speed == ci->speed) + return ci; + } + + return ERR_PTR(-ELNRNG); +} + +static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct rtl8169_private *tp = netdev_priv(dev); + const struct rtl_coalesce_info *ci; + u32 scale, c_us, c_fr; + u16 intrmit; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + memset(ec, 0, sizeof(*ec)); + + /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK]; + + intrmit = RTL_R16(tp, IntrMitigate); + + c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit); + ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit); + /* ethtool_coalesce states usecs and max_frames must not both be 0 */ + ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit); + ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit); + ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + return 0; +} + +/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */ +static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec, + u16 *cp01) +{ + const struct rtl_coalesce_info *ci; + u16 i; + + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + for (i = 0; i < 4; i++) { + if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) { + *cp01 = i; + return ci->scale_nsecs[i]; + } + } + + return -ERANGE; +} + +static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 tx_fr = ec->tx_max_coalesced_frames; + u32 rx_fr = ec->rx_max_coalesced_frames; + u32 coal_usec_max, units; + u16 w = 0, cp01 = 0; + int scale; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX) + return -ERANGE; + + coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs); + scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01); + if (scale < 0) + return scale; + + /* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it + * not only when usecs=0 because of e.g. the following scenario: + * + * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX) + * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1 + * - then user does `ethtool -C eth0 rx-usecs 100` + * + * Since ethtool sends to kernel whole ethtool_coalesce settings, + * if we want to ignore rx_frames then it has to be set to 0. + */ + if (rx_fr == 1) + rx_fr = 0; + if (tx_fr == 1) + tx_fr = 0; + + /* HW requires time limit to be set if frame limit is set */ + if ((tx_fr && !ec->tx_coalesce_usecs) || + (rx_fr && !ec->rx_coalesce_usecs)) + return -EINVAL; + + w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4)); + w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4)); + + units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units); + units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units); + + RTL_W16(tp, IntrMitigate, w); + + /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */ + if (rtl_is_8168evl_up(tp)) { + if (!rx_fr && !tx_fr) + /* disable packet counter */ + tp->cp_cmd |= PktCntrDisable; + else + tp->cp_cmd &= ~PktCntrDisable; + } + + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + return phy_ethtool_get_eee(tp->phydev, data); +} + +static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_eee(tp->phydev, data); + + if (!ret) + tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, + MDIO_AN_EEE_ADV); + return ret; +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_coalesce = rtl_get_coalesce, + .set_coalesce = rtl_set_coalesce, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, + .nway_reset = phy_ethtool_nway_reset, + .get_eee = rtl8169_get_eee, + .set_eee = rtl8169_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static void rtl_enable_eee(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int adv; + + /* respect EEE advertisement the user may have set */ + if (tp->eee_adv >= 0) + adv = tp->eee_adv; + else + adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + + if (adv >= 0) + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); +} + +static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) +{ + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; + } mac_info[] = { + /* 8125B family. */ + { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, + + /* 8125A family. */ + { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, + { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + + /* RTL8117 */ + { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 }, + + /* 8168EP family. */ + { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, + { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, + { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, + + /* 8168H family. */ + { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 }, + { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, + + /* 8168G family. */ + { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, + { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 }, + { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, + { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 }, + { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, + { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 }, + { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 }, + { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 }, + { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, + { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, + { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 }, + { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 }, + { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 }, + { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 }, + { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 }, + { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 }, + { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 }, + { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, + { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, + { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 }, + { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 }, + { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 }, + { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 }, + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 }, + { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 }, + { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 }, + { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR */ + { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 }, + { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 }, + + /* 8110 family. */ + { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 }, + { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 }, + { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 }, + { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 }, + { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 }, + + /* Catch-all */ + { 0x000, 0x000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + enum mac_version ver; + + while ((xid & p->mask) != p->val) + p++; + ver = p->ver; + + if (ver != RTL_GIGA_MAC_NONE && !gmii) { + if (ver == RTL_GIGA_MAC_VER_42) + ver = RTL_GIGA_MAC_VER_43; + else if (ver == RTL_GIGA_MAC_VER_45) + ver = RTL_GIGA_MAC_VER_47; + else if (ver == RTL_GIGA_MAC_VER_46) + ver = RTL_GIGA_MAC_VER_48; + } + + return ver; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (tp->rtl_fw) { + rtl_fw_release_firmware(tp->rtl_fw); + kfree(tp->rtl_fw); + tp->rtl_fw = NULL; + } +} + +void r8169_apply_firmware(struct rtl8169_private *tp) +{ + int val; + + /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ + if (tp->rtl_fw) { + rtl_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + + /* PHY soft reset may still be in progress */ + phy_read_poll_timeout(tp->phydev, MII_BMCR, val, + !(val & BMCR_RESET), + 50000, 600000, true); + } +} + +static void rtl8168_config_eee_mac(struct rtl8169_private *tp) +{ + /* Adjust EEE LED frequency */ + if (tp->mac_version != RTL_GIGA_MAC_VER_38) + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + rtl_eri_set_bits(tp, 0x1b0, 0x0003); +} + +static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) +{ + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); + r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); +} + +static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp) +{ + RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20); +} + +static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) +{ + rtl8125_set_eee_txidle_timer(tp); + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) +{ + const u16 w[] = { + addr[0] | (addr[1] << 8), + addr[2] | (addr[3] << 8), + addr[4] | (addr[5] << 8) + }; + + rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16)); + rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]); + rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16); + rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16)); +} + +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) +{ + u16 data1, data2, ioffset; + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data1 = r8168_mac_ocp_read(tp, 0xdd02); + data2 = r8168_mac_ocp_read(tp, 0xdd00); + + ioffset = (data2 >> 1) & 0x7ff8; + ioffset |= data2 & 0x0007; + if (data1 & BIT(7)) + ioffset |= BIT(15); + + return ioffset; +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + set_bit(flag, tp->wk.flags); + schedule_work(&tp->wk.work); +} + +static void rtl8169_init_phy(struct rtl8169_private *tp) +{ + r8169_hw_phy_config(tp, tp->phydev, tp->mac_version); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + /* set undocumented MAC Reg C+CR Offset 0x82h */ + RTL_W8(tp, 0x82, 0x01); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_05 && + tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE && + tp->pci_dev->subsystem_device == 0xe000) + phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b); + + /* We may have called phy_speed_down before */ + phy_speed_up(tp->phydev); + + if (rtl_supports_eee(tp)) + rtl_enable_eee(tp); + + genphy_soft_reset(tp->phydev); +} + +static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) +{ + rtl_unlock_config_regs(tp); + + RTL_W32(tp, MAC4, addr[4] | addr[5] << 8); + rtl_pci_commit(tp); + + RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); + rtl_pci_commit(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + rtl_lock_config_regs(tp); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + break; + default: + break; + } +} + +static void rtl_pll_power_down(struct rtl8169_private *tp) +{ + if (r8168_check_dash(tp)) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (device_may_wakeup(tp_to_dev(tp))) { + phy_speed_down(tp->phydev, false); + rtl_wol_suspend_quirk(tp); + return; + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); + break; + default: + break; + } +} + +static void rtl_pll_power_up(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); + break; + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); + rtl_eri_set_bits(tp, 0x1a8, 0xfc000000); + break; + default: + break; + } + + phy_resume(tp->phydev); +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); + break; + default: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x24); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); +} + +static void rtl_jumbo_config(struct rtl8169_private *tp) +{ + bool jumbo = tp->dev->mtu > ETH_DATA_LEN; + int readrq = 4096; + + rtl_unlock_config_regs(tp); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + if (jumbo) { + readrq = 512; + r8168b_1_hw_jumbo_enable(tp); + } else { + r8168b_1_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: + if (jumbo) { + readrq = 512; + r8168c_hw_jumbo_enable(tp); + } else { + r8168c_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: + if (jumbo) + r8168dp_hw_jumbo_enable(tp); + else + r8168dp_hw_jumbo_disable(tp); + break; + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: + if (jumbo) { + pcie_set_readrq(tp->pci_dev, 512); + r8168e_hw_jumbo_enable(tp); + } else { + r8168e_hw_jumbo_disable(tp); + } + break; + default: + break; + } + rtl_lock_config_regs(tp); + + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) + pcie_set_readrq(tp->pci_dev, readrq); + + /* Chip doesn't support pause in jumbo mode */ + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising, !jumbo); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising, !jumbo); + phy_start_aneg(tp->phydev); +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + return RTL_R8(tp, ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + RTL_W8(tp, ChipCmd, CmdReset); + + rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + + /* firmware loaded already or no firmware available */ + if (tp->rtl_fw || !tp->fw_name) + return; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + return; + + rtl_fw->phy_write = rtl_writephy; + rtl_fw->phy_read = rtl_readphy; + rtl_fw->mac_mcu_write = mac_mcu_write; + rtl_fw->mac_mcu_read = mac_mcu_read; + rtl_fw->fw_name = tp->fw_name; + rtl_fw->dev = tp_to_dev(tp); + + if (rtl_fw_request_firmware(rtl_fw)) + kfree(rtl_fw); + else + tp->rtl_fw = rtl_fw; +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + return RTL_R8(tp, TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond_2) +{ + /* IntrMitigate has new functionality on RTL8125 */ + return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103; +} + +static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52: + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_63: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); + break; + default: + break; + } +} + +static void rtl_enable_rxdvgate(struct rtl8169_private *tp) +{ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); + fsleep(2000); + rtl_wait_txrx_fifo_empty(tp); +} + +static void rtl_set_tx_config_registers(struct rtl8169_private *tp) +{ + u32 val = TX_DMA_BURST << TxDMAShift | + InterFrameGap << TxInterFrameGapShift; + + if (rtl_is_8168evl_up(tp)) + val |= TXCFG_AUTO_FIFO; + + RTL_W32(tp, TxConfig, val); +} + +static void rtl_set_rx_max_size(struct rtl8169_private *tp) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static void rtl8169_set_magic_reg(struct rtl8169_private *tp) +{ + u32 val; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + val = 0x000fff00; + else if (tp->mac_version == RTL_GIGA_MAC_VER_06) + val = 0x00ffff00; + else + return; + + if (RTL_R8(tp, Config2) & PCI_Clock_66MHz) + val |= 0xff; + + RTL_W32(tp, 0x7c, val); +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast; + /* Multicast hash filter */ + u32 mc_filter[2] = { 0xffffffff, 0xffffffff }; + struct rtl8169_private *tp = netdev_priv(dev); + u32 tmp; + + if (dev->flags & IFF_PROMISC) { + rx_mode |= AcceptAllPhys; + } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || + dev->flags & IFF_ALLMULTI || + tp->mac_version == RTL_GIGA_MAC_VER_35) { + /* accept all multicasts */ + } else if (netdev_mc_empty(dev)) { + rx_mode &= ~AcceptMulticast; + } else { + struct netdev_hw_addr *ha; + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + u32 bit_nr = eth_hw_addr_crc(ha) >> 26; + mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31); + } + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); + } + } + + RTL_W32(tp, MAR0 + 4, mc_filter[1]); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); + + tmp = RTL_R32(tp, RxConfig); + RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + return RTL_R32(tp, CSIAR) & CSIAR_FLAG; +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE | func << 16); + + rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | + CSIAR_BYTE_ENABLE); + + return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(tp, CSIDR) : ~0; +} + +static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) +{ + struct pci_dev *pdev = tp->pci_dev; + u32 csi; + + /* According to Realtek the value at config space address 0x070f + * controls the L0s/L1 entrance latency. We try standard ECAM access + * first and if it fails fall back to CSI. + */ + if (pdev->cfg_size > 0x070f && + pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) + return; + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | val << 24); +} + +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x27); +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void __rtl_ephy_init(struct rtl8169_private *tp, + const struct ephy_info *e, int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a)) + +static void rtl_disable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) +{ + /* work around an issue when PCI reset occurs during L2/L3 state */ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); +} + +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + /* Don't enable ASPM in the chip if OS can't control ASPM */ + if (enable && tp->aspm_manageable) { + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } + + udelay(10); +} + +static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat, + u16 tx_stat, u16 rx_dyn, u16 tx_dyn) +{ + /* Usage of dynamic vs. static FIFO is controlled by bit + * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known. + */ + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn); +} + +static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp, + u8 low, u8 high) +{ + /* FIFO thresholds for pause flow control */ + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high); +} + +static void rtl_hw_start_8168b(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168cp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(tp, DBG_REG, 0x20); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0020 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168c_2); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8168c_2(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, 0x0000, 0x0048 }, + { 0x19, 0x0020, 0x0050 }, + { 0x0c, 0x0100, 0x0020 }, + { 0x10, 0x0004, 0x0000 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168d_4); + + rtl_enable_clock_request(tp); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_1); + + rtl_disable_clock_request(tp); + + /* Reset tx FIFO pointer */ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); + + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_2); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_eri_set_bits(tp, 0x0d4, 0x1f00); + rtl_eri_set_bits(tp, 0x1d0, BIT(1)); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl8168_config_eee_mac(tp); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1); + + rtl_eri_set_bits(tp, 0x0d4, 0x1f00); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_disable(tp); + + rtl_ephy_init(tp, e_info_8168f_1); + + rtl_eri_set_bits(tp, 0x0d4, 0x0c00); +} + +static void rtl_hw_start_8168g(struct rtl8169_private *tp) +{ + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_eri_set_bits(tp, 0x0d4, 0x1f80); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_1[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8000, 0x0000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_1); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 }, + { 0x00, 0xffff, 0x10a3 }, + { 0x06, 0xffff, 0xf050 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x4000, 0x0000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_2); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x00, 0x0000, 0x0080 }, + { 0x06, 0x0000, 0x0010 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x0000, 0x4000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8411_2); + + /* The following Realtek-provided magic fixes an issue with the RX unit + * getting confused after the PHY having been powered-down. + */ + r8168_mac_ocp_write(tp, 0xFC28, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + r8168_mac_ocp_write(tp, 0xFC30, 0x0000); + r8168_mac_ocp_write(tp, 0xFC32, 0x0000); + r8168_mac_ocp_write(tp, 0xFC34, 0x0000); + r8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + r8168_mac_ocp_write(tp, 0xFC26, 0x0000); + + r8168_mac_ocp_write(tp, 0xF800, 0xE008); + r8168_mac_ocp_write(tp, 0xF802, 0xE00A); + r8168_mac_ocp_write(tp, 0xF804, 0xE00C); + r8168_mac_ocp_write(tp, 0xF806, 0xE00E); + r8168_mac_ocp_write(tp, 0xF808, 0xE027); + r8168_mac_ocp_write(tp, 0xF80A, 0xE04F); + r8168_mac_ocp_write(tp, 0xF80C, 0xE05E); + r8168_mac_ocp_write(tp, 0xF80E, 0xE065); + r8168_mac_ocp_write(tp, 0xF810, 0xC602); + r8168_mac_ocp_write(tp, 0xF812, 0xBE00); + r8168_mac_ocp_write(tp, 0xF814, 0x0000); + r8168_mac_ocp_write(tp, 0xF816, 0xC502); + r8168_mac_ocp_write(tp, 0xF818, 0xBD00); + r8168_mac_ocp_write(tp, 0xF81A, 0x074C); + r8168_mac_ocp_write(tp, 0xF81C, 0xC302); + r8168_mac_ocp_write(tp, 0xF81E, 0xBB00); + r8168_mac_ocp_write(tp, 0xF820, 0x080A); + r8168_mac_ocp_write(tp, 0xF822, 0x6420); + r8168_mac_ocp_write(tp, 0xF824, 0x48C2); + r8168_mac_ocp_write(tp, 0xF826, 0x8C20); + r8168_mac_ocp_write(tp, 0xF828, 0xC516); + r8168_mac_ocp_write(tp, 0xF82A, 0x64A4); + r8168_mac_ocp_write(tp, 0xF82C, 0x49C0); + r8168_mac_ocp_write(tp, 0xF82E, 0xF009); + r8168_mac_ocp_write(tp, 0xF830, 0x74A2); + r8168_mac_ocp_write(tp, 0xF832, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF834, 0x74A0); + r8168_mac_ocp_write(tp, 0xF836, 0xC50E); + r8168_mac_ocp_write(tp, 0xF838, 0x9CA2); + r8168_mac_ocp_write(tp, 0xF83A, 0x1C11); + r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF83E, 0xE006); + r8168_mac_ocp_write(tp, 0xF840, 0x74F8); + r8168_mac_ocp_write(tp, 0xF842, 0x48C4); + r8168_mac_ocp_write(tp, 0xF844, 0x8CF8); + r8168_mac_ocp_write(tp, 0xF846, 0xC404); + r8168_mac_ocp_write(tp, 0xF848, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84A, 0xC403); + r8168_mac_ocp_write(tp, 0xF84C, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2); + r8168_mac_ocp_write(tp, 0xF850, 0x0C0A); + r8168_mac_ocp_write(tp, 0xF852, 0xE434); + r8168_mac_ocp_write(tp, 0xF854, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF856, 0x49D9); + r8168_mac_ocp_write(tp, 0xF858, 0xF01F); + r8168_mac_ocp_write(tp, 0xF85A, 0xC526); + r8168_mac_ocp_write(tp, 0xF85C, 0x64A5); + r8168_mac_ocp_write(tp, 0xF85E, 0x1400); + r8168_mac_ocp_write(tp, 0xF860, 0xF007); + r8168_mac_ocp_write(tp, 0xF862, 0x0C01); + r8168_mac_ocp_write(tp, 0xF864, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF866, 0x1C15); + r8168_mac_ocp_write(tp, 0xF868, 0xC51B); + r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF86C, 0xE013); + r8168_mac_ocp_write(tp, 0xF86E, 0xC519); + r8168_mac_ocp_write(tp, 0xF870, 0x74A0); + r8168_mac_ocp_write(tp, 0xF872, 0x48C4); + r8168_mac_ocp_write(tp, 0xF874, 0x8CA0); + r8168_mac_ocp_write(tp, 0xF876, 0xC516); + r8168_mac_ocp_write(tp, 0xF878, 0x74A4); + r8168_mac_ocp_write(tp, 0xF87A, 0x48C8); + r8168_mac_ocp_write(tp, 0xF87C, 0x48CA); + r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4); + r8168_mac_ocp_write(tp, 0xF880, 0xC512); + r8168_mac_ocp_write(tp, 0xF882, 0x1B00); + r8168_mac_ocp_write(tp, 0xF884, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF886, 0x1B1C); + r8168_mac_ocp_write(tp, 0xF888, 0x483F); + r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2); + r8168_mac_ocp_write(tp, 0xF88C, 0x1B04); + r8168_mac_ocp_write(tp, 0xF88E, 0xC508); + r8168_mac_ocp_write(tp, 0xF890, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF892, 0xC505); + r8168_mac_ocp_write(tp, 0xF894, 0xBD00); + r8168_mac_ocp_write(tp, 0xF896, 0xC502); + r8168_mac_ocp_write(tp, 0xF898, 0xBD00); + r8168_mac_ocp_write(tp, 0xF89A, 0x0300); + r8168_mac_ocp_write(tp, 0xF89C, 0x051E); + r8168_mac_ocp_write(tp, 0xF89E, 0xE434); + r8168_mac_ocp_write(tp, 0xF8A0, 0xE018); + r8168_mac_ocp_write(tp, 0xF8A2, 0xE092); + r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20); + r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F); + r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4); + r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3); + r8168_mac_ocp_write(tp, 0xF8AE, 0xF007); + r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0); + r8168_mac_ocp_write(tp, 0xF8B2, 0xF103); + r8168_mac_ocp_write(tp, 0xF8B4, 0xC607); + r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8B8, 0xC606); + r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8BC, 0xC602); + r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C); + r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28); + r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C); + r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00); + r8168_mac_ocp_write(tp, 0xF8C8, 0xC707); + r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00); + r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2); + r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1); + r8168_mac_ocp_write(tp, 0xF8D0, 0xC502); + r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA); + r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0); + r8168_mac_ocp_write(tp, 0xF8D8, 0xC502); + r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8DC, 0x0132); + + r8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + r8168_mac_ocp_write(tp, 0xFC2A, 0x0743); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0801); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9); + r8168_mac_ocp_write(tp, 0xFC30, 0x02FD); + r8168_mac_ocp_write(tp, 0xFC32, 0x0C25); + r8168_mac_ocp_write(tp, 0xFC34, 0x00A9); + r8168_mac_ocp_write(tp, 0xFC36, 0x012D); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x854a }, + { 0x01, 0xffff, 0x068b } + }; + int rg_saw_cnt; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168h_1); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + rtl_eri_set_bits(tp, 0xdc, 0x001c); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008); + r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f80); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_1[] = { + { 0x00, 0xffff, 0x10ab }, + { 0x06, 0xffff, 0xf030 }, + { 0x08, 0xffff, 0x2006 }, + { 0x0d, 0xffff, 0x1666 }, + { 0x0c, 0x3ff0, 0x0000 } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_1); + + rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_2[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20ea } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_2); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0x0000, 0x0080 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_3); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8117(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8117[] = { + { 0x19, 0x0040, 0x1100 }, + { 0x59, 0x0040, 0x1100 }, + }; + int rg_saw_cnt; + + rtl8168ep_stop_cmac(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8117); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f90); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_write(tp, 0xea80, 0x0003); + r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + /* firmware is for MAC only */ + r8169_apply_firmware(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, FIX_NAK_1); + + RTL_W8(tp, Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + cfg1 = RTL_R8(tp, Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(tp, Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8401(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8401[] = { + { 0x01, 0xffff, 0x6fe5 }, + { 0x03, 0xffff, 0x0599 }, + { 0x06, 0xffff, 0xaf25 }, + { 0x07, 0xffff, 0x8e68 }, + }; + + rtl_ephy_init(tp, e_info_8401); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402); + + rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + rtl_hw_aspm_clkreq_enable(tp, false); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond) +{ + return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13); +} + +static void rtl_hw_start_8125_common(struct rtl8169_private *tp) +{ + rtl_pcie_state_l2l3_disable(tp); + + RTL_W16(tp, 0x382, 0x221b); + RTL_W8(tp, 0x4500, 0); + RTL_W16(tp, 0x4800, 0); + + /* disable UPS */ + r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10); + + r8168_mac_ocp_write(tp, 0xc140, 0xffff); + r8168_mac_ocp_write(tp, 0xc142, 0xffff); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + /* disable new tx descriptor format */ + r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); + else + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000); + else + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); + r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); + r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); + r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); + r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); + r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); + r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00); + r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); + + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001); + udelay(1); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000); + RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030); + + r8168_mac_ocp_write(tp, 0xe098, 0xc302); + + rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + rtl8125b_config_eee_mac(tp); + else + rtl8125a_config_eee_mac(tp); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + udelay(10); +} + +static void rtl_hw_start_8125a_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_1[] = { + { 0x01, 0xffff, 0xa812 }, + { 0x09, 0xffff, 0x520c }, + { 0x04, 0xffff, 0xd000 }, + { 0x0d, 0xffff, 0xf702 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x06, 0xffff, 0x001e }, + { 0x08, 0xffff, 0x3595 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x02, 0xffff, 0x6046 }, + { 0x29, 0xffff, 0xfe00 }, + { 0x23, 0xffff, 0xab62 }, + + { 0x41, 0xffff, 0xa80c }, + { 0x49, 0xffff, 0x520c }, + { 0x44, 0xffff, 0xd000 }, + { 0x4d, 0xffff, 0xf702 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x46, 0xffff, 0x001e }, + { 0x48, 0xffff, 0x3595 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x42, 0xffff, 0x6046 }, + { 0x69, 0xffff, 0xfe00 }, + { 0x63, 0xffff, 0xab62 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_1); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_2[] = { + { 0x04, 0xffff, 0xd000 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x23, 0xffff, 0xab66 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x29, 0xffff, 0xfe04 }, + + { 0x44, 0xffff, 0xd000 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x63, 0xffff, 0xab66 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x69, 0xffff, 0xfe04 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_2); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125b(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125b[] = { + { 0x0b, 0xffff, 0xa908 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x4b, 0xffff, 0xa908 }, + { 0x5e, 0xffff, 0x20eb }, + { 0x22, 0x0030, 0x0020 }, + { 0x62, 0x0030, 0x0020 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_aspm_clkreq_enable(tp, false); + + rtl_ephy_init(tp, e_info_8125b); + rtl_hw_start_8125_common(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_config(struct rtl8169_private *tp) +{ + static const rtl_generic_fct hw_configs[] = { + [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1, + [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, + [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, + [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1, + [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3, + [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4, + [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2, + [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, + [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, + [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, + [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, + [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2, + [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402, + [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411, + [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106, + [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2, + [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1, + [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2, + [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1, + [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, + [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, + }; + + if (hw_configs[tp->mac_version]) + hw_configs[tp->mac_version](tp); +} + +static void rtl_hw_start_8125(struct rtl8169_private *tp) +{ + int i; + + /* disable interrupt coalescing */ + for (i = 0xa00; i < 0xb00; i += 4) + RTL_W32(tp, i, 0); + + rtl_hw_config(tp); +} + +static void rtl_hw_start_8168(struct rtl8169_private *tp) +{ + if (rtl_is_8168evl_up(tp)) + RTL_W8(tp, MaxTxPacketSize, EarlySize); + else + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + rtl_hw_config(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start_8169(struct rtl8169_private *tp) +{ + RTL_W8(tp, EarlyTxThres, NoEarlyTx); + + tp->cp_cmd |= PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) + tp->cp_cmd |= EnAnaPLL; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start(struct rtl8169_private *tp) +{ + rtl_unlock_config_regs(tp); + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + rtl_hw_start_8169(tp); + else if (rtl_is_8125(tp)) + rtl_hw_start_8125(tp); + else + rtl_hw_start_8168(tp); + + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + rtl_lock_config_regs(tp); + + rtl_jumbo_config(tp); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + rtl_pci_commit(tp); + + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_init_rxcfg(tp); + rtl_set_tx_config_registers(tp); + rtl_set_rx_config_features(tp, tp->dev->features); + rtl_set_rx_mode(tp->dev); + rtl_irq_enable(tp); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + dev->mtu = new_mtu; + netdev_update_features(dev); + rtl_jumbo_config(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + rtl8125_set_eee_txidle_timer(tp); + break; + default: + break; + } + + return 0; +} + +static void rtl8169_mark_to_asic(struct RxDesc *desc) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts2 = 0; + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE)); +} + +static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + struct device *d = tp_to_dev(tp); + int node = dev_to_node(d); + dma_addr_t mapping; + struct page *data; + + data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE)); + if (!data) + return NULL; + + mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + netdev_err(tp->dev, "Failed to map RX DMA!\n"); + __free_pages(data, get_order(R8169_RX_BUF_SIZE)); + return NULL; + } + + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc); + + return data; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) { + dma_unmap_page(tp_to_dev(tp), + le64_to_cpu(tp->RxDescArray[i].addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE)); + tp->Rx_databuff[i] = NULL; + tp->RxDescArray[i].addr = 0; + tp->RxDescArray[i].opts1 = 0; + } +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + struct page *data; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_rx_clear(tp); + return -ENOMEM; + } + tp->Rx_databuff[i] = data; + } + + /* mark as last descriptor in the ring */ + tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd); + + return 0; +} + +static int rtl8169_init_ring(struct rtl8169_private *tp) +{ + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); + memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry) +{ + struct ring_info *tx_skb = tp->tx_skb + entry; + struct TxDesc *desc = tp->TxDescArray + entry; + + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len, + DMA_TO_DEVICE); + memset(desc, 0, sizeof(*desc)); + memset(tx_skb, 0, sizeof(*tx_skb)); +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(tp, entry); + if (skb) + dev_consume_skb_any(skb); + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + netdev_reset_queue(tp->dev); +} + +static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) +{ + napi_disable(&tp->napi); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_net(); + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + if (going_down && tp->dev->wol_enabled) + goto no_reset; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + rtl_enable_rxdvgate(tp); + fsleep(2000); + break; + default: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + fsleep(100); + break; + } + + rtl_hw_reset(tp); +no_reset: + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + int i; + + netif_stop_queue(tp->dev); + + rtl8169_cleanup(tp, false); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i); + + napi_enable(&tp->napi); + rtl_hw_start(tp); +} + +static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len, + void *addr, unsigned int entry, bool desc_own) +{ + struct TxDesc *txd = tp->TxDescArray + entry; + struct device *d = tp_to_dev(tp); + dma_addr_t mapping; + u32 opts1; + int ret; + + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + ret = dma_mapping_error(d, mapping); + if (unlikely(ret)) { + if (net_ratelimit()) + netdev_err(tp->dev, "Failed to map TX data!\n"); + return ret; + } + + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts[1]); + + opts1 = opts[0] | len; + if (entry == NUM_TX_DESC - 1) + opts1 |= RingEnd; + if (desc_own) + opts1 |= DescOwn; + txd->opts1 = cpu_to_le32(opts1); + + tp->tx_skb[entry].len = len; + + return 0; +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + const u32 *opts, unsigned int entry) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag; + + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + void *addr = skb_frag_address(frag); + u32 len = skb_frag_size(frag); + + entry = (entry + 1) % NUM_TX_DESC; + + if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true))) + goto err_out; + } + + return 0; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_skb_is_udp(struct sk_buff *skb) +{ + int no = skb_network_offset(skb); + struct ipv6hdr *i6h, _i6h; + struct iphdr *ih, _ih; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih); + return ih && ih->protocol == IPPROTO_UDP; + case htons(ETH_P_IPV6): + i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h); + return i6h && i6h->nexthdr == IPPROTO_UDP; + default: + return false; + } +} + +#define RTL_MIN_PATCH_LEN 47 + +/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */ +static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto = 0, len = skb->len; + + if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && + rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { + unsigned int trans_data_len = skb_tail_pointer(skb) - + skb_transport_header(skb); + + if (trans_data_len >= offsetof(struct udphdr, len) && + trans_data_len < RTL_MIN_PATCH_LEN) { + u16 dest = ntohs(udp_hdr(skb)->dest); + + /* dest is a standard PTP port */ + if (dest == 319 || dest == 320) + padto = len + RTL_MIN_PATCH_LEN - trans_data_len; + } + + if (trans_data_len < sizeof(struct udphdr)) + padto = max_t(unsigned int, padto, + len + sizeof(struct udphdr) - trans_data_len); + } + + return padto; +} + +static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto; + + padto = rtl8125_quirk_udp_padto(tp, skb); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_60: + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + padto = max_t(unsigned int, padto, ETH_ZLEN); + default: + break; + } + + return padto; +} + +static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) +{ + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + opts[0] |= TD_LSO; + opts[0] |= mss << TD0_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[0] |= TD0_IP_CS | TD0_TCP_CS; + else if (ip->protocol == IPPROTO_UDP) + opts[0] |= TD0_IP_CS | TD0_UDP_CS; + else + WARN_ON_ONCE(1); + } +} + +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 transport_offset = (u32)skb_transport_offset(skb); + struct skb_shared_info *shinfo = skb_shinfo(skb); + u32 mss = shinfo->gso_size; + + if (mss) { + if (shinfo->gso_type & SKB_GSO_TCPV4) { + opts[0] |= TD1_GTSENV4; + } else if (shinfo->gso_type & SKB_GSO_TCPV6) { + if (skb_cow_head(skb, 0)) + return false; + + tcp_v6_gso_csum_prep(skb); + opts[0] |= TD1_GTSENV6; + } else { + WARN_ON_ONCE(1); + } + + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= mss << TD1_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_protocol; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + opts[1] |= TD1_IPv4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts[1] |= TD1_IPv6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + opts[1] |= TD1_TCP_CS; + else if (ip_protocol == IPPROTO_UDP) + opts[1] |= TD1_UDP_CS; + else + WARN_ON_ONCE(1); + + opts[1] |= transport_offset << TCPHO_SHIFT; + } else { + unsigned int padto = rtl_quirk_packet_padto(tp, skb); + + /* skb_padto would free the skb on error */ + return !__skb_put_padto(skb, padto, false); + } + + return true; +} + +static bool rtl_tx_slots_avail(struct rtl8169_private *tp, + unsigned int nr_frags) +{ + unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx; + + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ + return slots_avail > nr_frags; +} + +/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ +static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + return false; + default: + return true; + } +} + +static void rtl8169_doorbell(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W16(tp, TxPoll_8125, BIT(0)); + else + RTL_W8(tp, TxPoll, NPQ); +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int frags = skb_shinfo(skb)->nr_frags; + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd_first, *txd_last; + bool stop_queue, door_bell; + u32 opts[2]; + + txd_first = tp->TxDescArray + entry; + + if (unlikely(!rtl_tx_slots_avail(tp, frags))) { + if (net_ratelimit()) + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + if (unlikely(le32_to_cpu(txd_first->opts1) & DescOwn)) + goto err_stop_0; + + opts[1] = rtl8169_tx_vlan_tag(skb); + opts[0] = 0; + + if (!rtl_chip_supports_csum_v2(tp)) + rtl8169_tso_csum_v1(skb, opts); + else if (!rtl8169_tso_csum_v2(tp, skb, opts)) + goto err_dma_0; + + if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data, + entry, false))) + goto err_dma_0; + + if (frags) { + if (rtl8169_xmit_frags(tp, skb, opts, entry)) + goto err_dma_1; + entry = (entry + frags) % NUM_TX_DESC; + } + + txd_last = tp->TxDescArray + entry; + txd_last->opts1 |= cpu_to_le32(LastFrag); + tp->tx_skb[entry].skb = skb; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + + txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag); + + /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */ + smp_wmb(); + + tp->cur_tx += frags + 1; + + stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); + if (unlikely(stop_queue)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + door_bell = true; + } + + if (door_bell) + rtl8169_doorbell(tp); + + if (unlikely(stop_queue)) { + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb(); + if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) + netif_start_queue(dev); + } + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(tp, entry); +err_dma_0: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static unsigned int rtl_last_frag_len(struct sk_buff *skb) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int nr_frags = info->nr_frags; + + if (!nr_frags) + return UINT_MAX; + + return skb_frag_size(info->frags + nr_frags - 1); +} + +/* Workaround for hw issues with TSO on RTL8168evl */ +static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb, + netdev_features_t features) +{ + /* IPv4 header has options field */ + if (vlan_get_protocol(skb) == htons(ETH_P_IP) && + ip_hdrlen(skb) > sizeof(struct iphdr)) + features &= ~NETIF_F_ALL_TSO; + + /* IPv4 TCP header has options field */ + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 && + tcp_hdrlen(skb) > sizeof(struct tcphdr)) + features &= ~NETIF_F_ALL_TSO; + + else if (rtl_last_frag_len(skb) <= 6) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static netdev_features_t rtl8169_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + int transport_offset = skb_transport_offset(skb); + struct rtl8169_private *tp = netdev_priv(dev); + + if (skb_is_gso(skb)) { + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + features = rtl8168evl_fix_tso(skb, features); + + if (transport_offset > GTTCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_ALL_TSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* work around hw bug on some chip versions */ + if (skb->len < ETH_ZLEN) + features &= ~NETIF_F_CSUM_MASK; + + if (rtl_quirk_packet_padto(tp, skb)) + features &= ~NETIF_F_CSUM_MASK; + + if (transport_offset > TCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_CSUM_MASK; + } + + return vlan_features_check(skb, features); +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int pci_status_errs; + u16 pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + + pci_status_errs = pci_status_get_and_clear_errors(pdev); + + if (net_ratelimit()) + netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n", + pci_cmd, pci_status_errs); + /* + * The recovery sequence below admits a very elaborated explanation: + * - it seems to work; + * - I did not see what else could be done; + * - it makes iop3xx happy. + * + * Feel free to adjust to your needs. + */ + if (pdev->broken_parity_status) + pci_cmd &= ~PCI_COMMAND_PARITY; + else + pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; + + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, + int budget) +{ + unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0; + + dirty_tx = tp->dirty_tx; + smp_rmb(); + + for (tx_left = tp->cur_tx - dirty_tx; tx_left > 0; tx_left--) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + struct sk_buff *skb = tp->tx_skb[entry].skb; + u32 status; + + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + rtl8169_unmap_tx_skb(tp, entry); + + if (skb) { + pkts_compl++; + bytes_compl += skb->len; + napi_consume_skb(skb, budget); + } + dirty_tx++; + } + + if (tp->dirty_tx != dirty_tx) { + netdev_completed_queue(dev, pkts_compl, bytes_compl); + + rtl_inc_priv_stats(&tp->tx_stats, pkts_compl, bytes_compl); + + tp->dirty_tx = dirty_tx; + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_mb(); + if (netif_queue_stopped(dev) && + rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { + netif_wake_queue(dev); + } + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + */ + if (tp->cur_tx != dirty_tx) + rtl8169_doorbell(tp); + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & RxProtoMask; + + if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || + ((status == RxProtoUDP) && !(opts1 & UDPFail))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) +{ + unsigned int cur_rx, rx_left, count; + struct device *d = tp_to_dev(tp); + + cur_rx = tp->cur_rx; + + for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { + unsigned int pkt_size, entry = cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + struct sk_buff *skb; + const void *rx_buf; + dma_addr_t addr; + u32 status; + + status = le32_to_cpu(desc->opts1); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Rx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + if (unlikely(status & RxRES)) { + if (net_ratelimit()) + netdev_warn(dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + + if (!(dev->features & NETIF_F_RXALL)) + goto release_descriptor; + else if (status & RxRWT || !(status & (RxRUNT | RxCRC))) + goto release_descriptor; + } + + pkt_size = status & GENMASK(13, 0); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size -= ETH_FCS_LEN; + + /* The driver does not support incoming fragmented frames. + * They are seen as a symptom of over-mtu sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + + addr = le64_to_cpu(desc->addr); + rx_buf = page_address(tp->Rx_databuff[entry]); + + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(rx_buf); + skb_copy_to_linear_data(skb, rx_buf, pkt_size); + skb->tail += pkt_size; + skb->len = pkt_size; + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + rtl8169_rx_csum(skb, status); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + napi_gro_receive(&tp->napi, skb); + + rtl_inc_priv_stats(&tp->rx_stats, 1, pkt_size); +release_descriptor: + rtl8169_mark_to_asic(desc); + } + + count = cur_rx - tp->cur_rx; + tp->cur_rx = cur_rx; + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct rtl8169_private *tp = dev_instance; + u32 status = rtl_get_events(tp); + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return IRQ_NONE; + + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(tp->dev); + goto out; + } + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + if (unlikely(status & RxFIFOOver && + tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(tp->dev); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + } + + rtl_irq_disable(tp); + napi_schedule(&tp->napi); +out: + rtl_ack_events(tp, status); + + return IRQ_HANDLED; +} + +static void rtl_task(struct work_struct *work) +{ + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + + rtnl_lock(); + + if (!netif_running(tp->dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) { + rtl_reset_work(tp); + netif_wake_queue(tp->dev); + } +out_unlock: + rtnl_unlock(); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + int work_done; + + work_done = rtl_rx(dev, tp, (u32) budget); + + rtl_tx(dev, tp, budget); + + if (work_done < budget && napi_complete_done(napi, work_done)) + rtl_irq_enable(tp); + + return work_done; +} + +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + if (net_ratelimit()) + phy_print_status(tp->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + phy_attached_info(phydev); + + return 0; +} + +static void rtl8169_down(struct rtl8169_private *tp) +{ + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + phy_stop(tp->phydev); + + rtl8169_update_counters(tp); + + pci_clear_master(tp->pci_dev); + rtl_pci_commit(tp); + + rtl8169_cleanup(tp, true); + + rtl_pll_power_down(tp); +} + +static void rtl8169_up(struct rtl8169_private *tp) +{ + pci_set_master(tp->pci_dev); + rtl_pll_power_up(tp); + rtl8169_init_phy(tp); + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_reset_work(tp); + + phy_start(tp->phydev); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + netif_stop_queue(dev); + rtl8169_down(tp); + rtl8169_rx_clear(tp); + + cancel_work_sync(&tp->wk.work); + + free_irq(pci_irq_vector(pdev, 0), tp); + + phy_disconnect(tp->phydev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_pm_runtime_put; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(tp); + if (retval < 0) + goto err_free_rx_1; + + rtl_request_firmware(tp); + + retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, + IRQF_SHARED, dev->name, tp); + if (retval < 0) + goto err_release_fw_2; + + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + + rtl8169_up(tp); + rtl8169_init_counter_offsets(tp); + netif_start_queue(dev); + + pm_runtime_put_sync(&pdev->dev); +out: + return retval; + +err_free_irq: + free_irq(pci_irq_vector(pdev, 0), tp); +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; +err_pm_runtime_put: + pm_runtime_put_noidle(&pdev->dev); + goto out; +} + +static void +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + struct rtl8169_counters *counters = tp->counters; + + pm_runtime_get_noresume(&pdev->dev); + + netdev_stats_to_stats64(stats, &dev->stats); + + rtl_get_priv_stats(&tp->rx_stats, &stats->rx_packets, &stats->rx_bytes); + rtl_get_priv_stats(&tp->tx_stats, &stats->tx_packets, &stats->tx_bytes); + + /* + * Fetch additional counter values missing in stats collected by driver + * from tally counters. + */ + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(tp); + + /* + * Subtract values fetched during initalization. + * See rtl8169_init_counter_offsets for a description why we do that. + */ + stats->tx_errors = le64_to_cpu(counters->tx_errors) - + le64_to_cpu(tp->tc_offset.tx_errors); + stats->collisions = le32_to_cpu(counters->tx_multi_collision) - + le32_to_cpu(tp->tc_offset.tx_multi_collision); + stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - + le16_to_cpu(tp->tc_offset.tx_aborted); + stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) - + le16_to_cpu(tp->tc_offset.rx_missed); + + pm_runtime_put_noidle(&pdev->dev); +} + +static void rtl8169_net_suspend(struct rtl8169_private *tp) +{ + netif_device_detach(tp->dev); + + if (netif_running(tp->dev)) + rtl8169_down(tp); +} + +#ifdef CONFIG_PM + +static int rtl8169_net_resume(struct rtl8169_private *tp) +{ + rtl_rar_set(tp, tp->dev->dev_addr); + + if (tp->TxDescArray) + rtl8169_up(tp); + + netif_device_attach(tp->dev); + + return 0; +} + +static int __maybe_unused rtl8169_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + rtnl_lock(); + rtl8169_net_suspend(tp); + if (!device_may_wakeup(tp_to_dev(tp))) + clk_disable_unprepare(tp->clk); + rtnl_unlock(); + + return 0; +} + +static int __maybe_unused rtl8169_resume(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!device_may_wakeup(tp_to_dev(tp))) + clk_prepare_enable(tp->clk); + + /* Reportedly at least Asus X453MA truncates packets otherwise */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37) + rtl_init_rxcfg(tp); + + return rtl8169_net_resume(tp); +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!tp->TxDescArray) { + netif_device_detach(tp->dev); + return 0; + } + + rtnl_lock(); + __rtl8169_set_wol(tp, WAKE_PHY); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + return 0; +} + +static int rtl8169_runtime_resume(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + __rtl8169_set_wol(tp, tp->saved_wolopts); + + return rtl8169_net_resume(tp); +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev)) + pm_schedule_suspend(device, 10000); + + return -EBUSY; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume) + SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume, + rtl8169_runtime_idle) +}; + +#endif /* CONFIG_PM */ + +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(tp, ChipCmd, CmdRxEnb); + rtl_pci_commit(tp); + break; + default: + break; + } +} + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + rtnl_lock(); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + /* Restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); + + if (system_state == SYSTEM_POWER_OFF) { + if (tp->saved_wolopts) { + rtl_wol_suspend_quirk(tp); + rtl_wol_shutdown_quirk(tp); + } + + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + unregister_netdev(tp->dev); + + if (r8168_check_dash(tp)) + rtl8168_driver_stop(tp); + + rtl_release_firmware(tp); + + /* restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_features_check = rtl8169_features_check, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static void rtl_set_irq_mask(struct rtl8169_private *tp) +{ + tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver; + else if (tp->mac_version == RTL_GIGA_MAC_VER_11) + /* special workaround needed */ + tp->irq_mask |= RxFIFOOver; + else + tp->irq_mask |= RxOverflow; +} + +static int rtl_alloc_irq(struct rtl8169_private *tp) +{ + unsigned int flags; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + rtl_unlock_config_regs(tp); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); + rtl_lock_config_regs(tp); + fallthrough; + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: + flags = PCI_IRQ_LEGACY; + break; + default: + flags = PCI_IRQ_ALL_TYPES; + break; + } + + return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); +} + +static void rtl_read_mac_address(struct rtl8169_private *tp, + u8 mac_addr[ETH_ALEN]) +{ + /* Get MAC address */ + if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) { + u32 value = rtl_eri_read(tp, 0xe0); + + mac_addr[0] = (value >> 0) & 0xff; + mac_addr[1] = (value >> 8) & 0xff; + mac_addr[2] = (value >> 16) & 0xff; + mac_addr[3] = (value >> 24) & 0xff; + + value = rtl_eri_read(tp, 0xe4); + mac_addr[4] = (value >> 0) & 0xff; + mac_addr[5] = (value >> 8) & 0xff; + } else if (rtl_is_8125(tp)) { + rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP); + } +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + return RTL_R8(tp, MCU) & LINK_LIST_RDY; +} + +static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp) +{ + rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42); +} + +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_IGNORE_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = devm_mdiobus_register(&pdev->dev, new_bus); + if (ret) + return ret; + + tp->phydev = mdiobus_get_phy(new_bus, 0); + if (!tp->phydev) { + return -ENODEV; + } else if (!tp->phydev->drv) { + /* Most chip versions fail with the genphy driver. + * Therefore ensure that the dedicated PHY driver is loaded. + */ + dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n", + tp->phydev->phy_id); + return -EUNATCH; + } + + /* PHY will be woken up in rtl_open() */ + phy_suspend(tp->phydev); + + return 0; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15)); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_init_8125(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0); + r8168_mac_ocp_write(tp, 0xc0a6, 0x0150); + r8168_mac_ocp_write(tp, 0xc01e, 0x5555); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: + rtl8168ep_stop_cmac(tp); + fallthrough; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + rtl_hw_init_8125(tp); + break; + default: + break; + } +} + +static int rtl_jumbo_max(struct rtl8169_private *tp) +{ + /* Non-GBit versions don't support jumbo frames */ + if (!tp->supports_gmii) + return 0; + + switch (tp->mac_version) { + /* RTL8169 */ + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + return JUMBO_7K; + /* RTL8168b */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + return JUMBO_4K; + /* RTL8168c */ + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + return JUMBO_6K; + default: + return JUMBO_9K; + } +} + +static void rtl_disable_clk(void *data) +{ + clk_disable_unprepare(data); +} + +static int rtl_get_ether_clk(struct rtl8169_private *tp) +{ + struct device *d = tp_to_dev(tp); + struct clk *clk; + int rc; + + clk = devm_clk_get(d, "ether_clk"); + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -ENOENT) + /* clk-core allows NULL (for suspend / resume) */ + rc = 0; + else if (rc != -EPROBE_DEFER) + dev_err(d, "failed to get clk: %d\n", rc); + } else { + tp->clk = clk; + rc = clk_prepare_enable(clk); + if (rc) + dev_err(d, "failed to enable clk: %d\n", rc); + else + rc = devm_add_action_or_reset(d, rtl_disable_clk, clk); + } + + return rc; +} + +static void rtl_init_mac_address(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u8 *mac_addr = dev->dev_addr; + int rc; + + rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); + if (!rc) + goto done; + + rtl_read_mac_address(tp, mac_addr); + if (is_valid_ether_addr(mac_addr)) + goto done; + + rtl_read_mac_from_reg(tp, mac_addr, MAC0); + if (is_valid_ether_addr(mac_addr)) + goto done; + + eth_hw_addr_random(dev); + dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); +done: + rtl_rar_set(tp, mac_addr); +} + +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct rtl8169_private *tp; + int jumbo_max, region, rc; + enum mac_version chipset; + struct net_device *dev; + u16 xid; + + dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; + tp->eee_adv = -1; + tp->ocp_base = OCP_STD_PHY_BASE; + + /* Get the *optional* external "ether_clk" used on some boards */ + rc = rtl_get_ether_clk(tp); + if (rc) + return rc; + + /* Disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + */ + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1); + tp->aspm_manageable = !rc; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pcim_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "enable failure\n"); + return rc; + } + + if (pcim_set_mwi(pdev) < 0) + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); + + /* use first MMIO region */ + region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; + if (region < 0) { + dev_err(&pdev->dev, "no MMIO resource found\n"); + return -ENODEV; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); + return -ENODEV; + } + + rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME); + if (rc < 0) { + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); + return rc; + } + + tp->mmio_addr = pcim_iomap_table(pdev)[region]; + + xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf; + + /* Identify chip attached to board */ + chipset = rtl8169_get_mac_version(xid, tp->supports_gmii); + if (chipset == RTL_GIGA_MAC_NONE) { + dev_err(&pdev->dev, "unknown chip XID %03x\n", xid); + return -ENODEV; + } + + tp->mac_version = chipset; + + tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK; + + if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev->features |= NETIF_F_HIGHDMA; + + rtl_init_rxcfg(tp); + + rtl8169_irq_mask_and_ack(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rc = rtl_alloc_irq(tp); + if (rc < 0) { + dev_err(&pdev->dev, "Can't allocate interrupt\n"); + return rc; + } + + INIT_WORK(&tp->wk.work, rtl_task); + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); + + rtl_init_mac_address(tp); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* Disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (rtl_chip_supports_csum_v2(tp)) + dev->hw_features |= NETIF_F_IPV6_CSUM; + + dev->features |= dev->hw_features; + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ + if (rtl_chip_supports_csum_v2(tp)) { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; + } else { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; + } + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* configure chip for default features */ + rtl8169_set_features(dev, dev->features); + + jumbo_max = rtl_jumbo_max(tp); + if (jumbo_max) + dev->max_mtu = jumbo_max; + + rtl_set_irq_mask(tp); + + tp->fw_name = rtl_chip_infos[chipset].fw_name; + + tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, + GFP_KERNEL); + if (!tp->counters) + return -ENOMEM; + + pci_set_drvdata(pdev, tp); + + rc = r8169_mdio_register(tp); + if (rc) + return rc; + + /* chip gets powered up in rtl_open() */ + rtl_pll_power_down(tp); + + rc = register_netdev(dev); + if (rc) + return rc; + + netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n", + rtl_chip_infos[chipset].name, dev->dev_addr, xid, + pci_irq_vector(pdev, 0)); + + if (jumbo_max) + netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n", + jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? + "ok" : "ko"); + + if (r8168_check_dash(tp)) { + netdev_info(dev, "DASH enabled\n"); + rtl8168_driver_start(tp); + } + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = MODULENAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, +#ifdef CONFIG_PM + .driver.pm = &rtl8169_pm_ops, +#endif +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/devices/r8169/r8169_main-5.14-ethercat.c b/devices/r8169/r8169_main-5.14-ethercat.c new file mode 100644 index 00000000..58ed3d95 --- /dev/null +++ b/devices/r8169/r8169_main-5.14-ethercat.c @@ -0,0 +1,5582 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "r8169-5.14-ethercat.h" +#include "r8169_firmware-5.14-ethercat.h" + + +#include "../ecdev.h" + + +#ifdef CONFIG_SUSE_KERNEL +#include +#else +# ifndef SUSE_VERSION +# define SUSE_VERSION 0 +# endif +# ifndef SUSE_PATCHLEVEL +# define SUSE_PATCHLEVEL 0 +# endif +#endif + + + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" +#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" +#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" +#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +#define MC_FILTER_LIMIT 32 + +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_RX_BUF_SIZE (SZ_16K - 1) +#define NUM_TX_DESC 256 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define OCP_STD_PHY_BASE 0xa400 + +#define RTL_CFG_NO_GBIT 1 + +/* write/read MMIO register */ +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg)) + +#define JUMBO_4K (4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) + +static const struct { + const char *name; + const char *fw_name; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, + [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, + [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, + [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, + [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, + [RTL_GIGA_MAC_VER_10] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e" }, + [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, + [RTL_GIGA_MAC_VER_16] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, + [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, + [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, + [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, + [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, + [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, + [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, + [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, + [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, + [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, + [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, + [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" }, + [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3}, + [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2}, + [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 }, + [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1}, + [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, + [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1}, + [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, + [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, + [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", }, + [RTL_GIGA_MAC_VER_60] = {"RTL8125A" }, + [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, + /* reserve 62 for CFG_METHOD_4 in the vendor driver */ + [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, +}; + +static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502) }, + { PCI_VDEVICE(REALTEK, 0x2600) }, + { PCI_VDEVICE(REALTEK, 0x8129) }, + { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT }, + { PCI_VDEVICE(REALTEK, 0x8161) }, + { PCI_VDEVICE(REALTEK, 0x8162) }, + { PCI_VDEVICE(REALTEK, 0x8167) }, + { PCI_VDEVICE(REALTEK, 0x8168) }, + { PCI_VDEVICE(NCUBE, 0x8168) }, + { PCI_VDEVICE(REALTEK, 0x8169) }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 }, + { PCI_VDEVICE(DLINK, 0x4300) }, + { PCI_VDEVICE(DLINK, 0x4302) }, + { PCI_VDEVICE(AT, 0xc107) }, + { PCI_VDEVICE(USR, 0x0116) }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 }, + { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 }, + { PCI_VDEVICE(REALTEK, 0x8125) }, + { PCI_VDEVICE(REALTEK, 0x3000) }, + {} +}; + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + +#define RTL_COALESCE_TX_USECS GENMASK(15, 12) +#define RTL_COALESCE_TX_FRAMES GENMASK(11, 8) +#define RTL_COALESCE_RX_USECS GENMASK(7, 4) +#define RTL_COALESCE_RX_FRAMES GENMASK(3, 0) + +#define RTL_COALESCE_T_MAX 0x0fU +#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_T_MAX * 4) + + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, + FuncForceEvent = 0xfc, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0000f000 +#define CSIAR_ADDR_MASK 0x00000fff + PMCH = 0x6f, +#define D3COLD_NO_PLL_DOWN BIT(7) +#define D3HOT_NO_PLL_DOWN BIT(6) +#define D3_NO_PLL_DOWN (BIT(7) | BIT(6)) + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl8125_registers { + IntrMask_8125 = 0x38, + IntrStatus_8125 = 0x3c, + TxPoll_8125 = 0x90, + MAC0_BKP = 0x19e0, + EEE_TXIDLE_TIMER_8125 = 0x6048, +}; + +#define RX_VLAN_INNER_8125 BIT(22) +#define RX_VLAN_OUTER_8125 BIT(23) +#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125) + +#define RX_FETCH_DFLT_8125 (8 << 27) + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, +#define RX_CONFIG_ACCEPT_ERR_MASK 0x30 + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_OK_MASK 0x0f +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + EnAnaPLL = (1 << 14), // 8169 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), +#define INTT_MASK GENMASK(1, 0) +#define CPCMD_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* ResetCounterCommand */ + CounterReset = 0x1, + + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* First doubleword. */ + TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ + TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7f + + /* Second doubleword. */ +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ff +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ + TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 0/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + +#define RxCSFailMask (IPFail | UDPFail | TCPFail) + + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RTL_GSO_MAX_SIZE_V1 32000 +#define RTL_GSO_MAX_SEGS_V1 24 +#define RTL_GSO_MAX_SIZE_V2 64000 +#define RTL_GSO_MAX_SEGS_V2 64 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED = 0, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_MAX +}; + +enum rtl_dash_type { + RTL_DASH_NONE, + RTL_DASH_DP, + RTL_DASH_EP, +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + enum rtl_dash_type dash_type; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + u16 cp_cmd; + u32 irq_mask; + struct clk *clk; + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct work_struct work; + } wk; + + unsigned supports_gmii:1; + unsigned aspm_manageable:1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + int eee_adv; + + const char *fw_name; + struct rtl_fw *rtl_fw; + + u32 ocp_base; + + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver (EtherCAT enabled)"); +MODULE_SOFTDEP("pre: realtek"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8168FP_3); +MODULE_FIRMWARE(FIRMWARE_8107E_1); +MODULE_FIRMWARE(FIRMWARE_8107E_2); +MODULE_FIRMWARE(FIRMWARE_8125A_3); +MODULE_FIRMWARE(FIRMWARE_8125B_2); + +static inline struct device *tp_to_dev(struct rtl8169_private *tp) +{ + return &tp->pci_dev->dev; +} + +static void rtl_lock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Lock); +} + +static void rtl_unlock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); +} + +static void rtl_pci_commit(struct rtl8169_private *tp) +{ + /* Read an arbitrary register to commit a preceding PCI write */ + RTL_R8(tp, ChipCmd); +} + +static bool rtl_is_8125(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_60; +} + +static bool rtl_is_8168evl_up(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_39 && + tp->mac_version <= RTL_GIGA_MAC_VER_53; +} + +static bool rtl_supports_eee(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_37 && + tp->mac_version != RTL_GIGA_MAC_VER_39; +} + +static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + mac[i] = RTL_R8(tp, reg + i); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + unsigned long usecs, int n, bool high) +{ + int i; + + for (i = 0; i < n; i++) { + if (c->check(tp) == high) + return true; + fsleep(usecs); + } + + if (net_ratelimit()) + netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n", + c->msg, !high, n, usecs); + return false; +} + +static bool rtl_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, true); +} + +static bool rtl_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) +{ + /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ + if (type == ERIAR_OOB && + (tp->mac_version == RTL_GIGA_MAC_VER_52 || + tp->mac_version == RTL_GIGA_MAC_VER_53)) + *cmd |= 0xf70 << 18; +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + return RTL_R32(tp, ERIAR) & ERIAR_FLAG; +} + +static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + u32 cmd = ERIAR_WRITE_CMD | type | mask | addr; + + if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask)) + return; + + RTL_W32(tp, ERIDR, val); + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val) +{ + _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC); +} + +static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr; + + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(tp, ERIDR) : ~0; +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr) +{ + return _rtl_eri_read(tp, addr, ERIAR_EXGMAC); +} + +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m) +{ + u32 val = rtl_eri_read(tp, addr); + + rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p); +} + +static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p) +{ + rtl_w0w1_eri(tp, addr, p, 0); +} + +static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m) +{ + rtl_w0w1_eri(tp, addr, 0, m); +} + +static bool rtl_ocp_reg_failure(u32 reg) +{ + return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg); +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, GPHY_OCP, reg << 15); + + return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, OCPDR, reg << 15); + + return RTL_R32(tp, OCPDR); +} + +static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, + u16 set) +{ + u16 data = r8168_mac_ocp_read(tp, reg); + + r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); +} + +/* Work around a hw issue with RTL8168g PHY, the quirk disables + * PHY MCU interrupts before PHY power-down. + */ +static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + if (value & BMCR_RESET || !(value & BMCR_PDOWN)) + rtl_eri_set_bits(tp, 0x1a8, 0xfc000000); + else + rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000); + break; + default: + break; + } +}; + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR) + rtl8168g_phy_suspend_quirk(tp, value); + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (reg == 0x1f) + return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4; + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + return RTL_R32(tp, PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + return RTL_R32(tp, OCPAR) & OCPAR_FLAG; +} + +static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) +{ + RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); +} + +static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_1_mdio_access(tp, reg, + OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); +} + +static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) +{ + r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? + RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_2_mdio_start(tp); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(tp); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + /* Work around issue with chip reporting wrong PHY ID */ + if (reg == MII_PHYSID2) + return 0xc912; + + r8168dp_2_mdio_start(tp); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(tp); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, int val) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + r8168dp_1_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_2_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168g_mdio_write(tp, location, val); + break; + default: + r8169_mdio_write(tp, location, val); + break; + } +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + return r8168dp_1_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_2_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + return r8168g_mdio_read(tp, location); + default: + return r8169_mdio_read(tp, location); + } +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff)); + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(tp, OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + return _rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + RTL_W32(tp, OCPDR, data); + RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd); + + r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_dp_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return r8168ep_ocp_read(tp, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + return RTL_R8(tp, IBISR0) & 0x20; +} + +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); + rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000); + RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); + RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_start(tp); + else + rtl8168ep_driver_start(tp); +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_stop(tp); + else + rtl8168ep_driver_stop(tp); +} + +static bool r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & BIT(15); +} + +static bool r8168ep_check_dash(struct rtl8169_private *tp) +{ + return r8168ep_ocp_read(tp, 0x128) & BIT(0); +} + +static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53: + return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE; + default: + return RTL_DASH_NONE; + } +} + +static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + if (enable) + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN); + else + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN); + break; + default: + break; + } +} + +static void rtl_reset_packet_filter(struct rtl8169_private *tp) +{ + rtl_eri_clear_bits(tp, 0xdc, BIT(0)); + rtl_eri_set_bits(tp, 0xdc, BIT(0)); +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; +} + +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u32 rtl_get_events(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + return RTL_R32(tp, IntrStatus_8125); + else + return RTL_R16(tp, IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u32 bits) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrStatus_8125, bits); + else + RTL_W16(tp, IntrStatus, bits); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, 0); + else + RTL_W16(tp, IntrMask, 0); +} + +static void rtl_irq_enable(struct rtl8169_private *tp) +{ + if (tp->ecdev) + return; + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, tp->irq_mask); + else + RTL_W16(tp, IntrMask, tp->irq_mask); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + rtl_irq_disable(tp); + rtl_ack_events(tp, 0xffffffff); + rtl_pci_commit(tp); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else if (phydev->speed == SPEED_100) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + rtl_reset_packet_filter(tp); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (phydev->speed == SPEED_10) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + } + } +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + wol->supported = WAKE_ANY; + wol->wolopts = tp->saved_wolopts; +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } + }; + unsigned int i, tmp = ARRAY_SIZE(cfg); + u8 options; + + rtl_unlock_config_regs(tp); + + if (rtl_is_8168evl_up(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2); + else + rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2); + } else if (rtl_is_8125(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0)); + else + r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); + } + + for (i = 0; i < tmp; i++) { + options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(tp, cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + options = RTL_R8(tp, Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(tp, Config1, options); + break; + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + options = RTL_R8(tp, Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(tp, Config2, options); + break; + default: + break; + } + + rtl_lock_config_regs(tp); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); + rtl_set_d3_pll_down(tp, !wolopts); + tp->dev->wol_enabled = wolopts ? 1 : 0; +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + + tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); + + return 0; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (rtl_fw) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > ETH_DATA_LEN && + tp->mac_version > RTL_GIGA_MAC_VER_06) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO); + + return features; +} + +static void rtl_set_rx_config_features(struct rtl8169_private *tp, + netdev_features_t features) +{ + u32 rx_config = RTL_R32(tp, RxConfig); + + if (features & NETIF_F_RXALL) + rx_config |= RX_CONFIG_ACCEPT_ERR_MASK; + else + rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK; + + if (rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rx_config |= RX_VLAN_8125; + else + rx_config &= ~RX_VLAN_8125; + } + + RTL_W32(tp, RxConfig, rx_config); +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_set_rx_config_features(tp, features); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (!rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + } + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; + + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); +} + +static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) +{ + u32 cmd = lower_32_bits(tp->counters_phys_addr); + + RTL_W32(tp, CounterAddrHigh, upper_32_bits(tp->counters_phys_addr)); + rtl_pci_commit(tp); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); + + rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); +} + +static void rtl8169_update_counters(struct rtl8169_private *tp) +{ + u8 val = RTL_R8(tp, ChipCmd); + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. If 0xff chip may be in a PCI power-save state. + */ + if (val & CmdRxEnb && val != 0xff) + rtl8169_do_counters(tp, CounterDump); +} + +static void rtl8169_init_counter_offsets(struct rtl8169_private *tp) +{ + struct rtl8169_counters *counters = tp->counters; + + /* + * rtl8169_init_counter_offsets is called from rtl_open. On chip + * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only + * reset by a power cycle, while the counter values collected by the + * driver are reset at every driver unload/load cycle. + * + * To make sure the HW values returned by @get_stats64 match the SW + * values, we collect the initial values at first open(*) and use them + * as offsets to normalize the values returned by @get_stats64. + * + * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one + * for the reason stated in rtl8169_update_counters; CmdRxEnb is only + * set at open time by rtl_hw_start. + */ + + if (tp->tc_offset.inited) + return; + + if (tp->mac_version >= RTL_GIGA_MAC_VER_19) { + rtl8169_do_counters(tp, CounterReset); + } else { + rtl8169_update_counters(tp); + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.rx_missed = counters->rx_missed; + } + + tp->tc_offset.inited = true; +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters; + + counters = tp->counters; + rtl8169_update_counters(tp); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +/* + * Interrupt coalescing + * + * > 1 - the availability of the IntrMitigate (0xe2) register through the + * > 8169, 8168 and 810x line of chipsets + * + * 8169, 8168, and 8136(810x) serial chipsets support it. + * + * > 2 - the Tx timer unit at gigabit speed + * + * The unit of the timer depends on both the speed and the setting of CPlusCmd + * (0xe0) bit 1 and bit 0. + * + * For 8169 + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 320ns 2.56us 40.96us + * 0 1 2.56us 20.48us 327.7us + * 1 0 5.12us 40.96us 655.4us + * 1 1 10.24us 81.92us 1.31ms + * + * For the other + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 5us 2.56us 40.96us + * 0 1 40us 20.48us 327.7us + * 1 0 80us 40.96us 655.4us + * 1 1 160us 81.92us 1.31ms + */ + +/* rx/tx scale factors for all CPlusCmd[0:1] cases */ +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +/* produce array with base delay *1, *8, *8*2, *8*2*2 */ +#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) } + +static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { + { SPEED_1000, COALESCE_DELAY(320) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; + +static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { + { SPEED_1000, COALESCE_DELAY(5000) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; +#undef COALESCE_DELAY + +/* get rx/tx scale vector corresponding to current speed */ +static const struct rtl_coalesce_info * +rtl_coalesce_info(struct rtl8169_private *tp) +{ + const struct rtl_coalesce_info *ci; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + ci = rtl_coalesce_info_8169; + else + ci = rtl_coalesce_info_8168_8136; + + /* if speed is unknown assume highest one */ + if (tp->phydev->speed == SPEED_UNKNOWN) + return ci; + + for (; ci->speed; ci++) { + if (tp->phydev->speed == ci->speed) + return ci; + } + + return ERR_PTR(-ELNRNG); +} + +static int rtl_get_coalesce(struct net_device *dev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 4 + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) + +#else + struct ethtool_coalesce *ec) +#endif +{ + struct rtl8169_private *tp = netdev_priv(dev); + const struct rtl_coalesce_info *ci; + u32 scale, c_us, c_fr; + u16 intrmit; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + memset(ec, 0, sizeof(*ec)); + + /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK]; + + intrmit = RTL_R16(tp, IntrMitigate); + + c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit); + ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit); + /* ethtool_coalesce states usecs and max_frames must not both be 0 */ + ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit); + ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit); + ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + return 0; +} + +/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */ +static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec, + u16 *cp01) +{ + const struct rtl_coalesce_info *ci; + u16 i; + + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + for (i = 0; i < 4; i++) { + if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) { + *cp01 = i; + return ci->scale_nsecs[i]; + } + } + + return -ERANGE; +} + +static int rtl_set_coalesce(struct net_device *dev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL >= 4 + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) + +#else + struct ethtool_coalesce *ec) +#endif +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 tx_fr = ec->tx_max_coalesced_frames; + u32 rx_fr = ec->rx_max_coalesced_frames; + u32 coal_usec_max, units; + u16 w = 0, cp01 = 0; + int scale; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX) + return -ERANGE; + + coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs); + scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01); + if (scale < 0) + return scale; + + /* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it + * not only when usecs=0 because of e.g. the following scenario: + * + * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX) + * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1 + * - then user does `ethtool -C eth0 rx-usecs 100` + * + * Since ethtool sends to kernel whole ethtool_coalesce settings, + * if we want to ignore rx_frames then it has to be set to 0. + */ + if (rx_fr == 1) + rx_fr = 0; + if (tx_fr == 1) + tx_fr = 0; + + /* HW requires time limit to be set if frame limit is set */ + if ((tx_fr && !ec->tx_coalesce_usecs) || + (rx_fr && !ec->rx_coalesce_usecs)) + return -EINVAL; + + w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4)); + w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4)); + + units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units); + units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units); + + RTL_W16(tp, IntrMitigate, w); + + /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */ + if (rtl_is_8168evl_up(tp)) { + if (!rx_fr && !tx_fr) + /* disable packet counter */ + tp->cp_cmd |= PktCntrDisable; + else + tp->cp_cmd &= ~PktCntrDisable; + } + + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + return phy_ethtool_get_eee(tp->phydev, data); +} + +static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_eee(tp->phydev, data); + + if (!ret) + tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, + MDIO_AN_EEE_ADV); + return ret; +} + +static void rtl8169_get_ringparam(struct net_device *dev, +#if SUSE_VERSION == 15 && SUSE_PATCHLEVEL == 5 + struct ethtool_ringparam *data, + struct kernel_ethtool_ringparam *kring, + struct netlink_ext_ack *ack) +#else + struct ethtool_ringparam *data) +#endif +{ + data->rx_max_pending = NUM_RX_DESC; + data->rx_pending = NUM_RX_DESC; + data->tx_max_pending = NUM_TX_DESC; + data->tx_pending = NUM_TX_DESC; +} + +static void rtl8169_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + bool tx_pause, rx_pause; + + phy_get_pause(tp->phydev, &tx_pause, &rx_pause); + + data->autoneg = tp->phydev->autoneg; + data->tx_pause = tx_pause ? 1 : 0; + data->rx_pause = rx_pause ? 1 : 0; +} + +static int rtl8169_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN) + return -EOPNOTSUPP; + + phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause); + + return 0; +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_coalesce = rtl_get_coalesce, + .set_coalesce = rtl_set_coalesce, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, + .nway_reset = phy_ethtool_nway_reset, + .get_eee = rtl8169_get_eee, + .set_eee = rtl8169_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = rtl8169_get_ringparam, + .get_pauseparam = rtl8169_get_pauseparam, + .set_pauseparam = rtl8169_set_pauseparam, +}; + +static void rtl_enable_eee(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int adv; + + /* respect EEE advertisement the user may have set */ + if (tp->eee_adv >= 0) + adv = tp->eee_adv; + else + adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + + if (adv >= 0) + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); +} + +static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) +{ + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; + } mac_info[] = { + /* 8125B family. */ + { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, + + /* 8125A family. */ + { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, + { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + + /* RTL8117 */ + { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 }, + { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 }, + + /* 8168EP family. */ + { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, + { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, + { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, + + /* 8168H family. */ + { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 }, + { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, + + /* 8168G family. */ + { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, + { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 }, + { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, + { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 }, + { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, + { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 }, + { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 }, + { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 }, + { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + /* It seems this early RTL8168dp version never made it to + * the wild. Let's see whether somebody complains, if not + * we'll remove support for this chip version completely. + * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, + */ + { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, + { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 }, + { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 }, + { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 }, + { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 }, + { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 }, + { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 }, + { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 }, + { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, + { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, + { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 }, + { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 }, + { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 }, + { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 }, + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 }, + { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 }, + { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 }, + { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR + * Not even r8101 vendor driver knows these id's, + * so let's disable detection for now. -- HK + * { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 }, + * { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 }, + */ + + /* 8110 family. */ + { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 }, + { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 }, + { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 }, + { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 }, + { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 }, + + /* Catch-all */ + { 0x000, 0x000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + enum mac_version ver; + + while ((xid & p->mask) != p->val) + p++; + ver = p->ver; + + if (ver != RTL_GIGA_MAC_NONE && !gmii) { + if (ver == RTL_GIGA_MAC_VER_42) + ver = RTL_GIGA_MAC_VER_43; + else if (ver == RTL_GIGA_MAC_VER_45) + ver = RTL_GIGA_MAC_VER_47; + else if (ver == RTL_GIGA_MAC_VER_46) + ver = RTL_GIGA_MAC_VER_48; + } + + return ver; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (tp->rtl_fw) { + rtl_fw_release_firmware(tp->rtl_fw); + kfree(tp->rtl_fw); + tp->rtl_fw = NULL; + } +} + +void r8169_apply_firmware(struct rtl8169_private *tp) +{ + int val; + + /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ + if (tp->rtl_fw) { + rtl_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + + /* PHY soft reset may still be in progress */ + phy_read_poll_timeout(tp->phydev, MII_BMCR, val, + !(val & BMCR_RESET), + 50000, 600000, true); + } +} + +static void rtl8168_config_eee_mac(struct rtl8169_private *tp) +{ + /* Adjust EEE LED frequency */ + if (tp->mac_version != RTL_GIGA_MAC_VER_38) + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + rtl_eri_set_bits(tp, 0x1b0, 0x0003); +} + +static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) +{ + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); + r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); +} + +static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp) +{ + RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20); +} + +static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) +{ + rtl8125_set_eee_txidle_timer(tp); + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr)); + rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4)); + rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16); + rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2)); +} + +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) +{ + u16 data1, data2, ioffset; + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data1 = r8168_mac_ocp_read(tp, 0xdd02); + data2 = r8168_mac_ocp_read(tp, 0xdd00); + + ioffset = (data2 >> 1) & 0x7ff8; + ioffset |= data2 & 0x0007; + if (data1 & BIT(7)) + ioffset |= BIT(15); + + return ioffset; +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + set_bit(flag, tp->wk.flags); + if (!tp->ecdev) + schedule_work(&tp->wk.work); +} + +static void rtl8169_init_phy(struct rtl8169_private *tp) +{ + r8169_hw_phy_config(tp, tp->phydev, tp->mac_version); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + /* set undocumented MAC Reg C+CR Offset 0x82h */ + RTL_W8(tp, 0x82, 0x01); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_05 && + tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE && + tp->pci_dev->subsystem_device == 0xe000) + phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b); + + /* We may have called phy_speed_down before */ + phy_speed_up(tp->phydev); + + if (rtl_supports_eee(tp)) + rtl_enable_eee(tp); + + genphy_soft_reset(tp->phydev); +} + +static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_unlock_config_regs(tp); + + RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4)); + rtl_pci_commit(tp); + + RTL_W32(tp, MAC0, get_unaligned_le32(addr)); + rtl_pci_commit(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + rtl_lock_config_regs(tp); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static void rtl_wol_enable_rx(struct rtl8169_private *tp) +{ + if (tp->mac_version >= RTL_GIGA_MAC_VER_25) + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); +} + +static void rtl_prepare_power_down(struct rtl8169_private *tp) +{ + if (tp->dash_type != RTL_DASH_NONE) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (device_may_wakeup(tp_to_dev(tp))) { + phy_speed_down(tp->phydev, false); + rtl_wol_enable_rx(tp); + } +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); + break; + default: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x24); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); +} + +static void rtl_jumbo_config(struct rtl8169_private *tp) +{ + bool jumbo = tp->dev->mtu > ETH_DATA_LEN; + int readrq = 4096; + + rtl_unlock_config_regs(tp); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + if (jumbo) { + readrq = 512; + r8168b_1_hw_jumbo_enable(tp); + } else { + r8168b_1_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: + if (jumbo) { + readrq = 512; + r8168c_hw_jumbo_enable(tp); + } else { + r8168c_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: + if (jumbo) + r8168dp_hw_jumbo_enable(tp); + else + r8168dp_hw_jumbo_disable(tp); + break; + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: + if (jumbo) + r8168e_hw_jumbo_enable(tp); + else + r8168e_hw_jumbo_disable(tp); + break; + default: + break; + } + rtl_lock_config_regs(tp); + + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) + pcie_set_readrq(tp->pci_dev, readrq); + + /* Chip doesn't support pause in jumbo mode */ + if (jumbo) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising); + phy_start_aneg(tp->phydev); + } +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + return RTL_R8(tp, ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + RTL_W8(tp, ChipCmd, CmdReset); + + rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + + /* firmware loaded already or no firmware available */ + if (tp->rtl_fw || !tp->fw_name) + return; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + return; + + rtl_fw->phy_write = rtl_writephy; + rtl_fw->phy_read = rtl_readphy; + rtl_fw->mac_mcu_write = mac_mcu_write; + rtl_fw->mac_mcu_read = mac_mcu_read; + rtl_fw->fw_name = tp->fw_name; + rtl_fw->dev = tp_to_dev(tp); + + if (rtl_fw_request_firmware(rtl_fw)) + kfree(rtl_fw); + else + tp->rtl_fw = rtl_fw; +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + return RTL_R8(tp, TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond_2) +{ + /* IntrMitigate has new functionality on RTL8125 */ + return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103; +} + +static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_63: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); + break; + default: + break; + } +} + +static void rtl_enable_rxdvgate(struct rtl8169_private *tp) +{ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); + fsleep(2000); + rtl_wait_txrx_fifo_empty(tp); +} + +static void rtl_set_tx_config_registers(struct rtl8169_private *tp) +{ + u32 val = TX_DMA_BURST << TxDMAShift | + InterFrameGap << TxInterFrameGapShift; + + if (rtl_is_8168evl_up(tp)) + val |= TXCFG_AUTO_FIFO; + + RTL_W32(tp, TxConfig, val); +} + +static void rtl_set_rx_max_size(struct rtl8169_private *tp) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static void rtl8169_set_magic_reg(struct rtl8169_private *tp) +{ + u32 val; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + val = 0x000fff00; + else if (tp->mac_version == RTL_GIGA_MAC_VER_06) + val = 0x00ffff00; + else + return; + + if (RTL_R8(tp, Config2) & PCI_Clock_66MHz) + val |= 0xff; + + RTL_W32(tp, 0x7c, val); +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast; + /* Multicast hash filter */ + u32 mc_filter[2] = { 0xffffffff, 0xffffffff }; + struct rtl8169_private *tp = netdev_priv(dev); + u32 tmp; + + if (dev->flags & IFF_PROMISC) { + rx_mode |= AcceptAllPhys; + } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || + dev->flags & IFF_ALLMULTI || + tp->mac_version == RTL_GIGA_MAC_VER_35) { + /* accept all multicasts */ + } else if (netdev_mc_empty(dev)) { + rx_mode &= ~AcceptMulticast; + } else { + struct netdev_hw_addr *ha; + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + u32 bit_nr = eth_hw_addr_crc(ha) >> 26; + mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31); + } + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); + } + } + + RTL_W32(tp, MAR0 + 4, mc_filter[1]); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); + + tmp = RTL_R32(tp, RxConfig); + RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + return RTL_R32(tp, CSIAR) & CSIAR_FLAG; +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE | func << 16); + + rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | + CSIAR_BYTE_ENABLE); + + return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(tp, CSIDR) : ~0; +} + +static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) +{ + struct pci_dev *pdev = tp->pci_dev; + u32 csi; + + /* According to Realtek the value at config space address 0x070f + * controls the L0s/L1 entrance latency. We try standard ECAM access + * first and if it fails fall back to CSI. + */ + if (pdev->cfg_size > 0x070f && + pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) + return; + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | val << 24); +} + +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x27); +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void __rtl_ephy_init(struct rtl8169_private *tp, + const struct ephy_info *e, int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a)) + +static void rtl_disable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) +{ + /* work around an issue when PCI reset occurs during L2/L3 state */ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); +} + +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + /* Don't enable ASPM in the chip if OS can't control ASPM */ + if (enable && tp->aspm_manageable) { + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } + + udelay(10); +} + +static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat, + u16 tx_stat, u16 rx_dyn, u16 tx_dyn) +{ + /* Usage of dynamic vs. static FIFO is controlled by bit + * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known. + */ + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn); +} + +static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp, + u8 low, u8 high) +{ + /* FIFO thresholds for pause flow control */ + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high); +} + +static void rtl_hw_start_8168b(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168cp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(tp, DBG_REG, 0x20); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0020 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168c_2); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, 0x0000, 0x0048 }, + { 0x19, 0x0020, 0x0050 }, + { 0x0c, 0x0100, 0x0020 }, + { 0x10, 0x0004, 0x0000 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168d_4); + + rtl_enable_clock_request(tp); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_1); + + rtl_disable_clock_request(tp); + + /* Reset tx FIFO pointer */ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); + + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_2); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_eri_set_bits(tp, 0x0d4, 0x1f00); + rtl_eri_set_bits(tp, 0x1d0, BIT(1)); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl8168_config_eee_mac(tp); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1); + + rtl_eri_set_bits(tp, 0x0d4, 0x1f00); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_disable(tp); + + rtl_ephy_init(tp, e_info_8168f_1); + + rtl_eri_set_bits(tp, 0x0d4, 0x0c00); +} + +static void rtl_hw_start_8168g(struct rtl8169_private *tp) +{ + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_eri_set_bits(tp, 0x0d4, 0x1f80); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_1[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8000, 0x0000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_1); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 }, + { 0x00, 0xffff, 0x10a3 }, + { 0x06, 0xffff, 0xf050 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x4000, 0x0000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_2); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x00, 0x0000, 0x0080 }, + { 0x06, 0x0000, 0x0010 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x0000, 0x4000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8411_2); + + /* The following Realtek-provided magic fixes an issue with the RX unit + * getting confused after the PHY having been powered-down. + */ + r8168_mac_ocp_write(tp, 0xFC28, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + r8168_mac_ocp_write(tp, 0xFC30, 0x0000); + r8168_mac_ocp_write(tp, 0xFC32, 0x0000); + r8168_mac_ocp_write(tp, 0xFC34, 0x0000); + r8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + r8168_mac_ocp_write(tp, 0xFC26, 0x0000); + + r8168_mac_ocp_write(tp, 0xF800, 0xE008); + r8168_mac_ocp_write(tp, 0xF802, 0xE00A); + r8168_mac_ocp_write(tp, 0xF804, 0xE00C); + r8168_mac_ocp_write(tp, 0xF806, 0xE00E); + r8168_mac_ocp_write(tp, 0xF808, 0xE027); + r8168_mac_ocp_write(tp, 0xF80A, 0xE04F); + r8168_mac_ocp_write(tp, 0xF80C, 0xE05E); + r8168_mac_ocp_write(tp, 0xF80E, 0xE065); + r8168_mac_ocp_write(tp, 0xF810, 0xC602); + r8168_mac_ocp_write(tp, 0xF812, 0xBE00); + r8168_mac_ocp_write(tp, 0xF814, 0x0000); + r8168_mac_ocp_write(tp, 0xF816, 0xC502); + r8168_mac_ocp_write(tp, 0xF818, 0xBD00); + r8168_mac_ocp_write(tp, 0xF81A, 0x074C); + r8168_mac_ocp_write(tp, 0xF81C, 0xC302); + r8168_mac_ocp_write(tp, 0xF81E, 0xBB00); + r8168_mac_ocp_write(tp, 0xF820, 0x080A); + r8168_mac_ocp_write(tp, 0xF822, 0x6420); + r8168_mac_ocp_write(tp, 0xF824, 0x48C2); + r8168_mac_ocp_write(tp, 0xF826, 0x8C20); + r8168_mac_ocp_write(tp, 0xF828, 0xC516); + r8168_mac_ocp_write(tp, 0xF82A, 0x64A4); + r8168_mac_ocp_write(tp, 0xF82C, 0x49C0); + r8168_mac_ocp_write(tp, 0xF82E, 0xF009); + r8168_mac_ocp_write(tp, 0xF830, 0x74A2); + r8168_mac_ocp_write(tp, 0xF832, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF834, 0x74A0); + r8168_mac_ocp_write(tp, 0xF836, 0xC50E); + r8168_mac_ocp_write(tp, 0xF838, 0x9CA2); + r8168_mac_ocp_write(tp, 0xF83A, 0x1C11); + r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF83E, 0xE006); + r8168_mac_ocp_write(tp, 0xF840, 0x74F8); + r8168_mac_ocp_write(tp, 0xF842, 0x48C4); + r8168_mac_ocp_write(tp, 0xF844, 0x8CF8); + r8168_mac_ocp_write(tp, 0xF846, 0xC404); + r8168_mac_ocp_write(tp, 0xF848, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84A, 0xC403); + r8168_mac_ocp_write(tp, 0xF84C, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2); + r8168_mac_ocp_write(tp, 0xF850, 0x0C0A); + r8168_mac_ocp_write(tp, 0xF852, 0xE434); + r8168_mac_ocp_write(tp, 0xF854, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF856, 0x49D9); + r8168_mac_ocp_write(tp, 0xF858, 0xF01F); + r8168_mac_ocp_write(tp, 0xF85A, 0xC526); + r8168_mac_ocp_write(tp, 0xF85C, 0x64A5); + r8168_mac_ocp_write(tp, 0xF85E, 0x1400); + r8168_mac_ocp_write(tp, 0xF860, 0xF007); + r8168_mac_ocp_write(tp, 0xF862, 0x0C01); + r8168_mac_ocp_write(tp, 0xF864, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF866, 0x1C15); + r8168_mac_ocp_write(tp, 0xF868, 0xC51B); + r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF86C, 0xE013); + r8168_mac_ocp_write(tp, 0xF86E, 0xC519); + r8168_mac_ocp_write(tp, 0xF870, 0x74A0); + r8168_mac_ocp_write(tp, 0xF872, 0x48C4); + r8168_mac_ocp_write(tp, 0xF874, 0x8CA0); + r8168_mac_ocp_write(tp, 0xF876, 0xC516); + r8168_mac_ocp_write(tp, 0xF878, 0x74A4); + r8168_mac_ocp_write(tp, 0xF87A, 0x48C8); + r8168_mac_ocp_write(tp, 0xF87C, 0x48CA); + r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4); + r8168_mac_ocp_write(tp, 0xF880, 0xC512); + r8168_mac_ocp_write(tp, 0xF882, 0x1B00); + r8168_mac_ocp_write(tp, 0xF884, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF886, 0x1B1C); + r8168_mac_ocp_write(tp, 0xF888, 0x483F); + r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2); + r8168_mac_ocp_write(tp, 0xF88C, 0x1B04); + r8168_mac_ocp_write(tp, 0xF88E, 0xC508); + r8168_mac_ocp_write(tp, 0xF890, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF892, 0xC505); + r8168_mac_ocp_write(tp, 0xF894, 0xBD00); + r8168_mac_ocp_write(tp, 0xF896, 0xC502); + r8168_mac_ocp_write(tp, 0xF898, 0xBD00); + r8168_mac_ocp_write(tp, 0xF89A, 0x0300); + r8168_mac_ocp_write(tp, 0xF89C, 0x051E); + r8168_mac_ocp_write(tp, 0xF89E, 0xE434); + r8168_mac_ocp_write(tp, 0xF8A0, 0xE018); + r8168_mac_ocp_write(tp, 0xF8A2, 0xE092); + r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20); + r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F); + r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4); + r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3); + r8168_mac_ocp_write(tp, 0xF8AE, 0xF007); + r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0); + r8168_mac_ocp_write(tp, 0xF8B2, 0xF103); + r8168_mac_ocp_write(tp, 0xF8B4, 0xC607); + r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8B8, 0xC606); + r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8BC, 0xC602); + r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C); + r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28); + r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C); + r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00); + r8168_mac_ocp_write(tp, 0xF8C8, 0xC707); + r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00); + r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2); + r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1); + r8168_mac_ocp_write(tp, 0xF8D0, 0xC502); + r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA); + r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0); + r8168_mac_ocp_write(tp, 0xF8D8, 0xC502); + r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8DC, 0x0132); + + r8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + r8168_mac_ocp_write(tp, 0xFC2A, 0x0743); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0801); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9); + r8168_mac_ocp_write(tp, 0xFC30, 0x02FD); + r8168_mac_ocp_write(tp, 0xFC32, 0x0C25); + r8168_mac_ocp_write(tp, 0xFC34, 0x00A9); + r8168_mac_ocp_write(tp, 0xFC36, 0x012D); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x854a }, + { 0x01, 0xffff, 0x068b } + }; + int rg_saw_cnt; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168h_1); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + rtl_eri_set_bits(tp, 0xdc, 0x001c); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008); + r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f80); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_1[] = { + { 0x00, 0xffff, 0x10ab }, + { 0x06, 0xffff, 0xf030 }, + { 0x08, 0xffff, 0x2006 }, + { 0x0d, 0xffff, 0x1666 }, + { 0x0c, 0x3ff0, 0x0000 } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_1); + + rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_2[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20ea } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_2); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0x0000, 0x0080 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_3); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8117(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8117[] = { + { 0x19, 0x0040, 0x1100 }, + { 0x59, 0x0040, 0x1100 }, + }; + int rg_saw_cnt; + + rtl8168ep_stop_cmac(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8117); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f90); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_write(tp, 0xea80, 0x0003); + r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + /* firmware is for MAC only */ + r8169_apply_firmware(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, FIX_NAK_1); + + RTL_W8(tp, Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + cfg1 = RTL_R8(tp, Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(tp, Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8401(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8401[] = { + { 0x01, 0xffff, 0x6fe5 }, + { 0x03, 0xffff, 0x0599 }, + { 0x06, 0xffff, 0xaf25 }, + { 0x07, 0xffff, 0x8e68 }, + }; + + rtl_ephy_init(tp, e_info_8401); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402); + + rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + rtl_hw_aspm_clkreq_enable(tp, false); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + + /* The default value is 0x13. Change it to 0x2f */ + rtl_csi_access_enable(tp, 0x2f); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond) +{ + return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13); +} + +static void rtl_hw_start_8125_common(struct rtl8169_private *tp) +{ + rtl_pcie_state_l2l3_disable(tp); + + RTL_W16(tp, 0x382, 0x221b); + RTL_W8(tp, 0x4500, 0); + RTL_W16(tp, 0x4800, 0); + + /* disable UPS */ + r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10); + + r8168_mac_ocp_write(tp, 0xc140, 0xffff); + r8168_mac_ocp_write(tp, 0xc142, 0xffff); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + /* disable new tx descriptor format */ + r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); + else + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000); + else + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); + r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); + r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); + r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); + r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); + r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); + r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00); + r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); + + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001); + udelay(1); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000); + RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030); + + r8168_mac_ocp_write(tp, 0xe098, 0xc302); + + rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + rtl8125b_config_eee_mac(tp); + else + rtl8125a_config_eee_mac(tp); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + udelay(10); +} + +static void rtl_hw_start_8125a_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_1[] = { + { 0x01, 0xffff, 0xa812 }, + { 0x09, 0xffff, 0x520c }, + { 0x04, 0xffff, 0xd000 }, + { 0x0d, 0xffff, 0xf702 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x06, 0xffff, 0x001e }, + { 0x08, 0xffff, 0x3595 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x02, 0xffff, 0x6046 }, + { 0x29, 0xffff, 0xfe00 }, + { 0x23, 0xffff, 0xab62 }, + + { 0x41, 0xffff, 0xa80c }, + { 0x49, 0xffff, 0x520c }, + { 0x44, 0xffff, 0xd000 }, + { 0x4d, 0xffff, 0xf702 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x46, 0xffff, 0x001e }, + { 0x48, 0xffff, 0x3595 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x42, 0xffff, 0x6046 }, + { 0x69, 0xffff, 0xfe00 }, + { 0x63, 0xffff, 0xab62 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_1); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_2[] = { + { 0x04, 0xffff, 0xd000 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x23, 0xffff, 0xab66 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x29, 0xffff, 0xfe04 }, + + { 0x44, 0xffff, 0xd000 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x63, 0xffff, 0xab66 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x69, 0xffff, 0xfe04 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_2); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125b(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125b[] = { + { 0x0b, 0xffff, 0xa908 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x4b, 0xffff, 0xa908 }, + { 0x5e, 0xffff, 0x20eb }, + { 0x22, 0x0030, 0x0020 }, + { 0x62, 0x0030, 0x0020 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_aspm_clkreq_enable(tp, false); + + rtl_ephy_init(tp, e_info_8125b); + rtl_hw_start_8125_common(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_config(struct rtl8169_private *tp) +{ + static const rtl_generic_fct hw_configs[] = { + [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1, + [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, + [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, + [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1, + [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4, + [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2, + [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, + [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, + [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, + [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, + [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2, + [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402, + [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411, + [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106, + [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2, + [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1, + [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2, + [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1, + [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, + [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, + }; + + if (hw_configs[tp->mac_version]) + hw_configs[tp->mac_version](tp); +} + +static void rtl_hw_start_8125(struct rtl8169_private *tp) +{ + int i; + + /* disable interrupt coalescing */ + for (i = 0xa00; i < 0xb00; i += 4) + RTL_W32(tp, i, 0); + + rtl_hw_config(tp); +} + +static void rtl_hw_start_8168(struct rtl8169_private *tp) +{ + if (rtl_is_8168evl_up(tp)) + RTL_W8(tp, MaxTxPacketSize, EarlySize); + else + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + rtl_hw_config(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start_8169(struct rtl8169_private *tp) +{ + RTL_W8(tp, EarlyTxThres, NoEarlyTx); + + tp->cp_cmd |= PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) + tp->cp_cmd |= EnAnaPLL; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start(struct rtl8169_private *tp) +{ + rtl_unlock_config_regs(tp); + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + rtl_hw_start_8169(tp); + else if (rtl_is_8125(tp)) + rtl_hw_start_8125(tp); + else + rtl_hw_start_8168(tp); + + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + rtl_lock_config_regs(tp); + + rtl_jumbo_config(tp); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + rtl_pci_commit(tp); + + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_init_rxcfg(tp); + rtl_set_tx_config_registers(tp); + rtl_set_rx_config_features(tp, tp->dev->features); + rtl_set_rx_mode(tp->dev); + rtl_irq_enable(tp); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + dev->mtu = new_mtu; + netdev_update_features(dev); + rtl_jumbo_config(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + rtl8125_set_eee_txidle_timer(tp); + break; + default: + break; + } + + return 0; +} + +static void rtl8169_mark_to_asic(struct RxDesc *desc) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts2 = 0; + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE)); +} + +static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + struct device *d = tp_to_dev(tp); + int node = dev_to_node(d); + dma_addr_t mapping; + struct page *data; + + data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE)); + if (!data) + return NULL; + + mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + netdev_err(tp->dev, "Failed to map RX DMA!\n"); + __free_pages(data, get_order(R8169_RX_BUF_SIZE)); + return NULL; + } + + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc); + + return data; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) { + dma_unmap_page(tp_to_dev(tp), + le64_to_cpu(tp->RxDescArray[i].addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE)); + tp->Rx_databuff[i] = NULL; + tp->RxDescArray[i].addr = 0; + tp->RxDescArray[i].opts1 = 0; + } +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + struct page *data; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_rx_clear(tp); + return -ENOMEM; + } + tp->Rx_databuff[i] = data; + } + + /* mark as last descriptor in the ring */ + tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd); + + return 0; +} + +static int rtl8169_init_ring(struct rtl8169_private *tp) +{ + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); + memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry) +{ + struct ring_info *tx_skb = tp->tx_skb + entry; + struct TxDesc *desc = tp->TxDescArray + entry; + + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len, + DMA_TO_DEVICE); + memset(desc, 0, sizeof(*desc)); + memset(tx_skb, 0, sizeof(*tx_skb)); +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(tp, entry); + if (!tp->ecdev && skb) + dev_consume_skb_any(skb); + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + if (!tp->ecdev) + netdev_reset_queue(tp->dev); +} + +static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) +{ + if (!tp->ecdev) + napi_disable(&tp->napi); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_net(); + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + if (going_down && tp->dev->wol_enabled) + goto no_reset; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + rtl_enable_rxdvgate(tp); + fsleep(2000); + break; + default: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + fsleep(100); + break; + } + + rtl_hw_reset(tp); +no_reset: + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + int i; + + if (!tp->ecdev) + netif_stop_queue(tp->dev); + + rtl8169_cleanup(tp, false); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i); + + if (!tp->ecdev) + napi_enable(&tp->napi); + rtl_hw_start(tp); +} + +static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len, + void *addr, unsigned int entry, bool desc_own) +{ + struct TxDesc *txd = tp->TxDescArray + entry; + struct device *d = tp_to_dev(tp); + dma_addr_t mapping; + u32 opts1; + int ret; + + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + ret = dma_mapping_error(d, mapping); + if (unlikely(ret)) { + if (net_ratelimit()) + netdev_err(tp->dev, "Failed to map TX data!\n"); + return ret; + } + + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts[1]); + + opts1 = opts[0] | len; + if (entry == NUM_TX_DESC - 1) + opts1 |= RingEnd; + if (desc_own) + opts1 |= DescOwn; + txd->opts1 = cpu_to_le32(opts1); + + tp->tx_skb[entry].len = len; + + return 0; +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + const u32 *opts, unsigned int entry) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag; + + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + void *addr = skb_frag_address(frag); + u32 len = skb_frag_size(frag); + + entry = (entry + 1) % NUM_TX_DESC; + + if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true))) + goto err_out; + } + + return 0; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_skb_is_udp(struct sk_buff *skb) +{ + int no = skb_network_offset(skb); + struct ipv6hdr *i6h, _i6h; + struct iphdr *ih, _ih; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih); + return ih && ih->protocol == IPPROTO_UDP; + case htons(ETH_P_IPV6): + i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h); + return i6h && i6h->nexthdr == IPPROTO_UDP; + default: + return false; + } +} + +#define RTL_MIN_PATCH_LEN 47 + +/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */ +static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto = 0, len = skb->len; + + if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && + rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { + unsigned int trans_data_len = skb_tail_pointer(skb) - + skb_transport_header(skb); + + if (trans_data_len >= offsetof(struct udphdr, len) && + trans_data_len < RTL_MIN_PATCH_LEN) { + u16 dest = ntohs(udp_hdr(skb)->dest); + + /* dest is a standard PTP port */ + if (dest == 319 || dest == 320) + padto = len + RTL_MIN_PATCH_LEN - trans_data_len; + } + + if (trans_data_len < sizeof(struct udphdr)) + padto = max_t(unsigned int, padto, + len + sizeof(struct udphdr) - trans_data_len); + } + + return padto; +} + +static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto; + + padto = rtl8125_quirk_udp_padto(tp, skb); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_60: + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + padto = max_t(unsigned int, padto, ETH_ZLEN); + break; + default: + break; + } + + return padto; +} + +static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) +{ + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + opts[0] |= TD_LSO; + opts[0] |= mss << TD0_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[0] |= TD0_IP_CS | TD0_TCP_CS; + else if (ip->protocol == IPPROTO_UDP) + opts[0] |= TD0_IP_CS | TD0_UDP_CS; + else + WARN_ON_ONCE(1); + } +} + +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 transport_offset = (u32)skb_transport_offset(skb); + struct skb_shared_info *shinfo = skb_shinfo(skb); + u32 mss = shinfo->gso_size; + + if (mss) { + if (shinfo->gso_type & SKB_GSO_TCPV4) { + opts[0] |= TD1_GTSENV4; + } else if (shinfo->gso_type & SKB_GSO_TCPV6) { + if (skb_cow_head(skb, 0)) + return false; + + tcp_v6_gso_csum_prep(skb); + opts[0] |= TD1_GTSENV6; + } else { + WARN_ON_ONCE(1); + } + + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= mss << TD1_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_protocol; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + opts[1] |= TD1_IPv4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts[1] |= TD1_IPv6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + opts[1] |= TD1_TCP_CS; + else if (ip_protocol == IPPROTO_UDP) + opts[1] |= TD1_UDP_CS; + else + WARN_ON_ONCE(1); + + opts[1] |= transport_offset << TCPHO_SHIFT; + } else { + unsigned int padto = rtl_quirk_packet_padto(tp, skb); + + /* skb_padto would free the skb on error */ + return !__skb_put_padto(skb, padto, false); + } + + return true; +} + +static bool rtl_tx_slots_avail(struct rtl8169_private *tp) +{ + unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC + - READ_ONCE(tp->cur_tx); + + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ + return slots_avail > MAX_SKB_FRAGS; +} + +/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ +static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + return false; + default: + return true; + } +} + +static void rtl8169_doorbell(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W16(tp, TxPoll_8125, BIT(0)); + else + RTL_W8(tp, TxPoll, NPQ); +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int frags = skb_shinfo(skb)->nr_frags; + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd_first, *txd_last; + bool stop_queue = 0, door_bell = 0; + u32 opts[2]; + + if (unlikely(!rtl_tx_slots_avail(tp))) { + if (net_ratelimit()) + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + opts[1] = rtl8169_tx_vlan_tag(skb); + opts[0] = 0; + + if (!rtl_chip_supports_csum_v2(tp)) + rtl8169_tso_csum_v1(skb, opts); + else if (!rtl8169_tso_csum_v2(tp, skb, opts)) + goto err_dma_0; + + if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data, + entry, false))) + goto err_dma_0; + + txd_first = tp->TxDescArray + entry; + + if (frags) { + if (rtl8169_xmit_frags(tp, skb, opts, entry)) + goto err_dma_1; + entry = (entry + frags) % NUM_TX_DESC; + } + + txd_last = tp->TxDescArray + entry; + txd_last->opts1 |= cpu_to_le32(LastFrag); + tp->tx_skb[entry].skb = skb; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + door_bell = tp->ecdev || __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + + txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag); + + /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */ + smp_wmb(); + + WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1); + + stop_queue = !tp->ecdev && !rtl_tx_slots_avail(tp); + if (unlikely(stop_queue)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb__after_atomic(); + if (rtl_tx_slots_avail(tp)) + netif_start_queue(dev); + door_bell = true; + } + + if (door_bell) + rtl8169_doorbell(tp); + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(tp, entry); +err_dma_0: + if (!tp->ecdev) + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + if (!tp->ecdev) + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static unsigned int rtl_last_frag_len(struct sk_buff *skb) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int nr_frags = info->nr_frags; + + if (!nr_frags) + return UINT_MAX; + + return skb_frag_size(info->frags + nr_frags - 1); +} + +/* Workaround for hw issues with TSO on RTL8168evl */ +static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb, + netdev_features_t features) +{ + /* IPv4 header has options field */ + if (vlan_get_protocol(skb) == htons(ETH_P_IP) && + ip_hdrlen(skb) > sizeof(struct iphdr)) + features &= ~NETIF_F_ALL_TSO; + + /* IPv4 TCP header has options field */ + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 && + tcp_hdrlen(skb) > sizeof(struct tcphdr)) + features &= ~NETIF_F_ALL_TSO; + + else if (rtl_last_frag_len(skb) <= 6) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static netdev_features_t rtl8169_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + int transport_offset = skb_transport_offset(skb); + struct rtl8169_private *tp = netdev_priv(dev); + + if (skb_is_gso(skb)) { + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + features = rtl8168evl_fix_tso(skb, features); + + if (transport_offset > GTTCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_ALL_TSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* work around hw bug on some chip versions */ + if (skb->len < ETH_ZLEN) + features &= ~NETIF_F_CSUM_MASK; + + if (rtl_quirk_packet_padto(tp, skb)) + features &= ~NETIF_F_CSUM_MASK; + + if (transport_offset > TCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_CSUM_MASK; + } + + return vlan_features_check(skb, features); +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int pci_status_errs; + u16 pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + + pci_status_errs = pci_status_get_and_clear_errors(pdev); + + if (net_ratelimit()) + netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n", + pci_cmd, pci_status_errs); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, + int budget) +{ + unsigned int dirty_tx, bytes_compl = 0, pkts_compl = 0; + struct sk_buff *skb; + + dirty_tx = tp->dirty_tx; + + while (READ_ONCE(tp->cur_tx) != dirty_tx) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + u32 status; + + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + skb = tp->tx_skb[entry].skb; + rtl8169_unmap_tx_skb(tp, entry); + + if (skb) { + pkts_compl++; + bytes_compl += skb->len; + if (!tp->ecdev) + napi_consume_skb(skb, budget); + } + dirty_tx++; + } + + if (tp->dirty_tx != dirty_tx) { + if (!tp->ecdev) { + netdev_completed_queue(dev, pkts_compl, bytes_compl); + dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl); + } + + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_store_mb(tp->dirty_tx, dirty_tx); + if (!tp->ecdev && netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) + netif_wake_queue(dev); + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + * If skb is NULL then we come here again once a tx irq is + * triggered after the last fragment is marked transmitted. + */ + if (tp->cur_tx != dirty_tx && skb) + rtl8169_doorbell(tp); + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & (RxProtoMask | RxCSFailMask); + + if (status == RxProtoTCP || status == RxProtoUDP) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget) +{ + struct device *d = tp_to_dev(tp); + int count; + + for (count = 0; count < budget; count++, tp->cur_rx++) { + unsigned int pkt_size, entry = tp->cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + struct sk_buff *skb; + const void *rx_buf; + dma_addr_t addr; + u32 status; + + status = le32_to_cpu(desc->opts1); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Rx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + if (unlikely(status & RxRES)) { + if (net_ratelimit()) + netdev_warn(dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + + if (!(dev->features & NETIF_F_RXALL)) + goto release_descriptor; + else if (status & RxRWT || !(status & (RxRUNT | RxCRC))) + goto release_descriptor; + } + + pkt_size = status & GENMASK(13, 0); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size -= ETH_FCS_LEN; + + /* The driver does not support incoming fragmented frames. + * They are seen as a symptom of over-mtu sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + if (!tp->ecdev) { + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + } else { + skb = NULL; + } + + addr = le64_to_cpu(desc->addr); + rx_buf = page_address(tp->Rx_databuff[entry]); + + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(rx_buf); + if (tp->ecdev) { + ecdev_receive(tp->ecdev, rx_buf, pkt_size); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + tp->ec_watchdog_jiffies = jiffies; + + } else { + skb_copy_to_linear_data(skb, rx_buf, pkt_size); + skb->tail += pkt_size; + skb->len = pkt_size; + } + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + if (!tp->ecdev) { + rtl8169_rx_csum(skb, status); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + napi_gro_receive(&tp->napi, skb); + dev_sw_netstats_rx_add(dev, pkt_size); + } +release_descriptor: + rtl8169_mark_to_asic(desc); + } + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct rtl8169_private *tp = dev_instance; + u32 status = rtl_get_events(tp); + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return IRQ_NONE; + + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(tp->dev); + goto out; + } + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + if (unlikely(status & RxFIFOOver && + tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(tp->dev); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + } + + if (napi_schedule_prep(&tp->napi)) { + rtl_irq_disable(tp); + __napi_schedule(&tp->napi); + } +out: + rtl_ack_events(tp, status); + + return IRQ_HANDLED; +} + +static void rtl_task(struct work_struct *work) +{ + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + + rtnl_lock(); + + if (!netif_running(tp->dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) { + rtl_reset_work(tp); + netif_wake_queue(tp->dev); + } +out_unlock: + rtnl_unlock(); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + int work_done; + + rtl_tx(dev, tp, budget); + + work_done = rtl_rx(dev, tp, budget); + + if (work_done < budget && napi_complete_done(napi, work_done)) + rtl_irq_enable(tp); + + return work_done; +} + +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + if (net_ratelimit()) + phy_print_status(tp->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + phy_attached_info(phydev); + + return 0; +} + +static void rtl8169_down(struct rtl8169_private *tp) +{ + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + phy_stop(tp->phydev); + + rtl8169_update_counters(tp); + + pci_clear_master(tp->pci_dev); + rtl_pci_commit(tp); + + rtl8169_cleanup(tp, true); + + rtl_prepare_power_down(tp); +} + +static void rtl8169_up(struct rtl8169_private *tp) +{ + pci_set_master(tp->pci_dev); + phy_init_hw(tp->phydev); + phy_resume(tp->phydev); + rtl8169_init_phy(tp); + if (!tp->ecdev) + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_reset_work(tp); + + phy_start(tp->phydev); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + if (!tp->ecdev) + netif_stop_queue(dev); + rtl8169_down(tp); + rtl8169_rx_clear(tp); + + cancel_work_sync(&tp->wk.work); + + if (!tp->ecdev) + free_irq(pci_irq_vector(pdev, 0), tp); + + phy_disconnect(tp->phydev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned long irqflags; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto out; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(tp); + if (retval < 0) + goto err_free_rx_1; + + rtl_request_firmware(tp); + + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED; + if (!tp->ecdev) { + retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, + irqflags, dev->name, tp); + if (retval < 0) + goto err_release_fw_2; + } + + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + + rtl8169_up(tp); + rtl8169_init_counter_offsets(tp); + if (!tp->ecdev) + netif_start_queue(dev); + else + ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + +out: + pm_runtime_put_sync(&pdev->dev); + + return retval; + +err_free_irq: + free_irq(pci_irq_vector(pdev, 0), tp); +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + goto out; +} + +static void +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + struct rtl8169_counters *counters = tp->counters; + + pm_runtime_get_noresume(&pdev->dev); + + netdev_stats_to_stats64(stats, &dev->stats); + dev_fetch_sw_netstats(stats, dev->tstats); + + /* + * Fetch additional counter values missing in stats collected by driver + * from tally counters. + */ + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(tp); + + /* + * Subtract values fetched during initalization. + * See rtl8169_init_counter_offsets for a description why we do that. + */ + stats->tx_errors = le64_to_cpu(counters->tx_errors) - + le64_to_cpu(tp->tc_offset.tx_errors); + stats->collisions = le32_to_cpu(counters->tx_multi_collision) - + le32_to_cpu(tp->tc_offset.tx_multi_collision); + stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - + le16_to_cpu(tp->tc_offset.tx_aborted); + stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) - + le16_to_cpu(tp->tc_offset.rx_missed); + + pm_runtime_put_noidle(&pdev->dev); +} + +static void rtl8169_net_suspend(struct rtl8169_private *tp) +{ + netif_device_detach(tp->dev); + + if (netif_running(tp->dev)) + rtl8169_down(tp); +} + +#ifdef CONFIG_PM + +static int rtl8169_runtime_resume(struct device *dev) +{ + struct rtl8169_private *tp = dev_get_drvdata(dev); + + rtl_rar_set(tp, tp->dev->dev_addr); + __rtl8169_set_wol(tp, tp->saved_wolopts); + + if (tp->TxDescArray) + rtl8169_up(tp); + + netif_device_attach(tp->dev); + + return 0; +} + +static int __maybe_unused rtl8169_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + rtnl_lock(); + rtl8169_net_suspend(tp); + if (!device_may_wakeup(tp_to_dev(tp))) + clk_disable_unprepare(tp->clk); + rtnl_unlock(); + + return 0; +} + +static int __maybe_unused rtl8169_resume(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + if (!device_may_wakeup(tp_to_dev(tp))) + clk_prepare_enable(tp->clk); + + /* Reportedly at least Asus X453MA truncates packets otherwise */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37) + rtl_init_rxcfg(tp); + + return rtl8169_runtime_resume(device); +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + if (!tp->TxDescArray) { + netif_device_detach(tp->dev); + return 0; + } + + rtnl_lock(); + __rtl8169_set_wol(tp, WAKE_PHY); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev)) + pm_schedule_suspend(device, 10000); + + return -EBUSY; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume) + SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume, + rtl8169_runtime_idle) +}; + +#endif /* CONFIG_PM */ + +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(tp, ChipCmd, CmdRxEnb); + rtl_pci_commit(tp); + break; + default: + break; + } +} + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + rtnl_lock(); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + /* Restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); + + if (system_state == SYSTEM_POWER_OFF) { + if (tp->saved_wolopts) + rtl_wol_shutdown_quirk(tp); + + pci_wake_from_d3(pdev, tp->saved_wolopts); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + if (tp->ecdev) { + ecdev_close(tp->ecdev); + ecdev_withdraw(tp->ecdev); + } else { + unregister_netdev(tp->dev); + } + + if (tp->dash_type != RTL_DASH_NONE) + rtl8168_driver_stop(tp); + + rtl_release_firmware(tp); + + /* restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_features_check = rtl8169_features_check, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static void rtl_set_irq_mask(struct rtl8169_private *tp) +{ + tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver; + else if (tp->mac_version == RTL_GIGA_MAC_VER_11) + /* special workaround needed */ + tp->irq_mask |= RxFIFOOver; + else + tp->irq_mask |= RxOverflow; +} + +static int rtl_alloc_irq(struct rtl8169_private *tp) +{ + unsigned int flags; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + rtl_unlock_config_regs(tp); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); + rtl_lock_config_regs(tp); + fallthrough; + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: + flags = PCI_IRQ_LEGACY; + break; + default: + flags = PCI_IRQ_ALL_TYPES; + break; + } + + return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); +} + +static void rtl_read_mac_address(struct rtl8169_private *tp, + u8 mac_addr[ETH_ALEN]) +{ + /* Get MAC address */ + if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) { + u32 value; + + value = rtl_eri_read(tp, 0xe0); + put_unaligned_le32(value, mac_addr); + value = rtl_eri_read(tp, 0xe4); + put_unaligned_le16(value, mac_addr + 4); + } else if (rtl_is_8125(tp)) { + rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP); + } +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + return RTL_R8(tp, MCU) & LINK_LIST_RDY; +} + +static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp) +{ + rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42); +} + +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_MAC_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = devm_mdiobus_register(&pdev->dev, new_bus); + if (ret) + return ret; + + tp->phydev = mdiobus_get_phy(new_bus, 0); + if (!tp->phydev) { + return -ENODEV; + } else if (!tp->phydev->drv) { + /* Most chip versions fail with the genphy driver. + * Therefore ensure that the dedicated PHY driver is loaded. + */ + dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n", + tp->phydev->phy_id); + return -EUNATCH; + } + + tp->phydev->mac_managed_pm = 1; + + phy_support_asym_pause(tp->phydev); + + /* PHY will be woken up in rtl_open() */ + phy_suspend(tp->phydev); + + return 0; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15)); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_init_8125(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0); + r8168_mac_ocp_write(tp, 0xc0a6, 0x0150); + r8168_mac_ocp_write(tp, 0xc01e, 0x5555); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53: + rtl8168ep_stop_cmac(tp); + fallthrough; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + rtl_hw_init_8125(tp); + break; + default: + break; + } +} + +static int rtl_jumbo_max(struct rtl8169_private *tp) +{ + /* Non-GBit versions don't support jumbo frames */ + if (!tp->supports_gmii) + return 0; + + switch (tp->mac_version) { + /* RTL8169 */ + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + return JUMBO_7K; + /* RTL8168b */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + return JUMBO_4K; + /* RTL8168c */ + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + return JUMBO_6K; + default: + return JUMBO_9K; + } +} + +static void rtl_disable_clk(void *data) +{ + clk_disable_unprepare(data); +} + +static int rtl_get_ether_clk(struct rtl8169_private *tp) +{ + struct device *d = tp_to_dev(tp); + struct clk *clk; + int rc; + + clk = devm_clk_get(d, "ether_clk"); + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -ENOENT) + /* clk-core allows NULL (for suspend / resume) */ + rc = 0; + else + dev_err_probe(d, rc, "failed to get clk\n"); + } else { + tp->clk = clk; + rc = clk_prepare_enable(clk); + if (rc) + dev_err(d, "failed to enable clk: %d\n", rc); + else + rc = devm_add_action_or_reset(d, rtl_disable_clk, clk); + } + + return rc; +} + +static void rtl_init_mac_address(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u8 *mac_addr = dev->dev_addr; + int rc; + + rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); + if (!rc) + goto done; + + rtl_read_mac_address(tp, mac_addr); + if (is_valid_ether_addr(mac_addr)) + goto done; + + rtl_read_mac_from_reg(tp, mac_addr, MAC0); + if (is_valid_ether_addr(mac_addr)) + goto done; + + eth_hw_addr_random(dev); + dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); +done: + rtl_rar_set(tp, mac_addr); +} + +static void ec_poll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u16 status = rtl_get_events(tp); + + if (jiffies - tp->ec_watchdog_jiffies >= 2 * HZ) { + ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + tp->ec_watchdog_jiffies = jiffies; + } + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return; + + rtl_tx(dev, tp, 100); + + rtl_rx(dev, tp, 100); + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + rtl_ack_events(tp, status); +} + + +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct rtl8169_private *tp; + int jumbo_max, region, rc; + enum mac_version chipset; + struct net_device *dev; + u16 xid; + + dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; + tp->eee_adv = -1; + tp->ocp_base = OCP_STD_PHY_BASE; + + dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, + struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + /* Get the *optional* external "ether_clk" used on some boards */ + rc = rtl_get_ether_clk(tp); + if (rc) + return rc; + + /* Disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + */ + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1); + tp->aspm_manageable = !rc; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pcim_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "enable failure\n"); + return rc; + } + + if (pcim_set_mwi(pdev) < 0) + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); + + /* use first MMIO region */ + region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; + if (region < 0) { + dev_err(&pdev->dev, "no MMIO resource found\n"); + return -ENODEV; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); + return -ENODEV; + } + + rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME); + if (rc < 0) { + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); + return rc; + } + + tp->mmio_addr = pcim_iomap_table(pdev)[region]; + + xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf; + + /* Identify chip attached to board */ + chipset = rtl8169_get_mac_version(xid, tp->supports_gmii); + if (chipset == RTL_GIGA_MAC_NONE) { + dev_err(&pdev->dev, "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n", xid); + return -ENODEV; + } + + tp->mac_version = chipset; + + tp->dash_type = rtl_check_dash(tp); + + tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK; + + if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev->features |= NETIF_F_HIGHDMA; + + rtl_init_rxcfg(tp); + + rtl8169_irq_mask_and_ack(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rc = rtl_alloc_irq(tp); + if (rc < 0) { + dev_err(&pdev->dev, "Can't allocate interrupt\n"); + return rc; + } + + INIT_WORK(&tp->wk.work, rtl_task); + + rtl_init_mac_address(tp); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* Disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (rtl_chip_supports_csum_v2(tp)) + dev->hw_features |= NETIF_F_IPV6_CSUM; + + dev->features |= dev->hw_features; + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ + if (rtl_chip_supports_csum_v2(tp)) { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; + } else { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; + } + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* configure chip for default features */ + rtl8169_set_features(dev, dev->features); + + rtl_set_d3_pll_down(tp, true); + + jumbo_max = rtl_jumbo_max(tp); + if (jumbo_max) + dev->max_mtu = jumbo_max; + + rtl_set_irq_mask(tp); + + tp->fw_name = rtl_chip_infos[chipset].fw_name; + + tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, + GFP_KERNEL); + if (!tp->counters) + return -ENOMEM; + + pci_set_drvdata(pdev, tp); + + rc = r8169_mdio_register(tp); + if (rc) + return rc; + + tp->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ec_watchdog_jiffies = jiffies; + + if (!tp->ecdev) { + rc = register_netdev(dev); + if (rc) + return rc; + } + + netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n", + rtl_chip_infos[chipset].name, dev->dev_addr, xid, + pci_irq_vector(pdev, 0)); + + if (jumbo_max) + netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n", + jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? + "ok" : "ko"); + + if (tp->dash_type != RTL_DASH_NONE) { + netdev_info(dev, "DASH enabled\n"); + rtl8168_driver_start(tp); + } + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_sync(&pdev->dev); + + if (tp->ecdev) { + rc = ecdev_open(tp->ecdev); + if (rc) { + ecdev_withdraw(tp->ecdev); + return rc; + } + } + + return 0; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, + .driver.pm = pm_ptr(&rtl8169_pm_ops), +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/devices/r8169/r8169_main-5.14-orig.c b/devices/r8169/r8169_main-5.14-orig.c new file mode 100644 index 00000000..6a566676 --- /dev/null +++ b/devices/r8169/r8169_main-5.14-orig.c @@ -0,0 +1,5456 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "r8169.h" +#include "r8169_firmware.h" + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" +#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" +#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" +#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +#define MC_FILTER_LIMIT 32 + +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_RX_BUF_SIZE (SZ_16K - 1) +#define NUM_TX_DESC 256 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define OCP_STD_PHY_BASE 0xa400 + +#define RTL_CFG_NO_GBIT 1 + +/* write/read MMIO register */ +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg)) + +#define JUMBO_4K (4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) + +static const struct { + const char *name; + const char *fw_name; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, + [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, + [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, + [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, + [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, + [RTL_GIGA_MAC_VER_10] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e" }, + [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, + [RTL_GIGA_MAC_VER_16] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, + [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, + [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, + [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, + [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, + [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, + [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, + [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, + [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, + [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, + [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, + [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" }, + [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3}, + [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2}, + [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 }, + [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1}, + [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, + [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1}, + [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, + [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, + [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", }, + [RTL_GIGA_MAC_VER_60] = {"RTL8125A" }, + [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, + /* reserve 62 for CFG_METHOD_4 in the vendor driver */ + [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, +}; + +static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502) }, + { PCI_VDEVICE(REALTEK, 0x2600) }, + { PCI_VDEVICE(REALTEK, 0x8129) }, + { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT }, + { PCI_VDEVICE(REALTEK, 0x8161) }, + { PCI_VDEVICE(REALTEK, 0x8162) }, + { PCI_VDEVICE(REALTEK, 0x8167) }, + { PCI_VDEVICE(REALTEK, 0x8168) }, + { PCI_VDEVICE(NCUBE, 0x8168) }, + { PCI_VDEVICE(REALTEK, 0x8169) }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 }, + { PCI_VDEVICE(DLINK, 0x4300) }, + { PCI_VDEVICE(DLINK, 0x4302) }, + { PCI_VDEVICE(AT, 0xc107) }, + { PCI_VDEVICE(USR, 0x0116) }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 }, + { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 }, + { PCI_VDEVICE(REALTEK, 0x8125) }, + { PCI_VDEVICE(REALTEK, 0x3000) }, + {} +}; + +MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + +#define RTL_COALESCE_TX_USECS GENMASK(15, 12) +#define RTL_COALESCE_TX_FRAMES GENMASK(11, 8) +#define RTL_COALESCE_RX_USECS GENMASK(7, 4) +#define RTL_COALESCE_RX_FRAMES GENMASK(3, 0) + +#define RTL_COALESCE_T_MAX 0x0fU +#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_T_MAX * 4) + + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, + FuncForceEvent = 0xfc, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0000f000 +#define CSIAR_ADDR_MASK 0x00000fff + PMCH = 0x6f, +#define D3COLD_NO_PLL_DOWN BIT(7) +#define D3HOT_NO_PLL_DOWN BIT(6) +#define D3_NO_PLL_DOWN (BIT(7) | BIT(6)) + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl8125_registers { + IntrMask_8125 = 0x38, + IntrStatus_8125 = 0x3c, + TxPoll_8125 = 0x90, + MAC0_BKP = 0x19e0, + EEE_TXIDLE_TIMER_8125 = 0x6048, +}; + +#define RX_VLAN_INNER_8125 BIT(22) +#define RX_VLAN_OUTER_8125 BIT(23) +#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125) + +#define RX_FETCH_DFLT_8125 (8 << 27) + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, +#define RX_CONFIG_ACCEPT_ERR_MASK 0x30 + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_OK_MASK 0x0f +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + EnAnaPLL = (1 << 14), // 8169 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), +#define INTT_MASK GENMASK(1, 0) +#define CPCMD_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* ResetCounterCommand */ + CounterReset = 0x1, + + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* First doubleword. */ + TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ + TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7f + + /* Second doubleword. */ +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ff +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ + TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 0/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + +#define RxCSFailMask (IPFail | UDPFail | TCPFail) + + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RTL_GSO_MAX_SIZE_V1 32000 +#define RTL_GSO_MAX_SEGS_V1 24 +#define RTL_GSO_MAX_SIZE_V2 64000 +#define RTL_GSO_MAX_SEGS_V2 64 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED = 0, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_MAX +}; + +enum rtl_dash_type { + RTL_DASH_NONE, + RTL_DASH_DP, + RTL_DASH_EP, +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + enum rtl_dash_type dash_type; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + u16 cp_cmd; + u32 irq_mask; + struct clk *clk; + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct work_struct work; + } wk; + + unsigned supports_gmii:1; + unsigned aspm_manageable:1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + int eee_adv; + + const char *fw_name; + struct rtl_fw *rtl_fw; + + u32 ocp_base; +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); +MODULE_SOFTDEP("pre: realtek"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8168FP_3); +MODULE_FIRMWARE(FIRMWARE_8107E_1); +MODULE_FIRMWARE(FIRMWARE_8107E_2); +MODULE_FIRMWARE(FIRMWARE_8125A_3); +MODULE_FIRMWARE(FIRMWARE_8125B_2); + +static inline struct device *tp_to_dev(struct rtl8169_private *tp) +{ + return &tp->pci_dev->dev; +} + +static void rtl_lock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Lock); +} + +static void rtl_unlock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); +} + +static void rtl_pci_commit(struct rtl8169_private *tp) +{ + /* Read an arbitrary register to commit a preceding PCI write */ + RTL_R8(tp, ChipCmd); +} + +static bool rtl_is_8125(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_60; +} + +static bool rtl_is_8168evl_up(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_39 && + tp->mac_version <= RTL_GIGA_MAC_VER_53; +} + +static bool rtl_supports_eee(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_37 && + tp->mac_version != RTL_GIGA_MAC_VER_39; +} + +static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + mac[i] = RTL_R8(tp, reg + i); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + unsigned long usecs, int n, bool high) +{ + int i; + + for (i = 0; i < n; i++) { + if (c->check(tp) == high) + return true; + fsleep(usecs); + } + + if (net_ratelimit()) + netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n", + c->msg, !high, n, usecs); + return false; +} + +static bool rtl_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, true); +} + +static bool rtl_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) +{ + /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ + if (type == ERIAR_OOB && + (tp->mac_version == RTL_GIGA_MAC_VER_52 || + tp->mac_version == RTL_GIGA_MAC_VER_53)) + *cmd |= 0xf70 << 18; +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + return RTL_R32(tp, ERIAR) & ERIAR_FLAG; +} + +static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + u32 cmd = ERIAR_WRITE_CMD | type | mask | addr; + + if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask)) + return; + + RTL_W32(tp, ERIDR, val); + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val) +{ + _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC); +} + +static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr; + + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(tp, ERIDR) : ~0; +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr) +{ + return _rtl_eri_read(tp, addr, ERIAR_EXGMAC); +} + +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m) +{ + u32 val = rtl_eri_read(tp, addr); + + rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p); +} + +static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p) +{ + rtl_w0w1_eri(tp, addr, p, 0); +} + +static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m) +{ + rtl_w0w1_eri(tp, addr, 0, m); +} + +static bool rtl_ocp_reg_failure(u32 reg) +{ + return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg); +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, GPHY_OCP, reg << 15); + + return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, OCPDR, reg << 15); + + return RTL_R32(tp, OCPDR); +} + +static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, + u16 set) +{ + u16 data = r8168_mac_ocp_read(tp, reg); + + r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); +} + +/* Work around a hw issue with RTL8168g PHY, the quirk disables + * PHY MCU interrupts before PHY power-down. + */ +static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + if (value & BMCR_RESET || !(value & BMCR_PDOWN)) + rtl_eri_set_bits(tp, 0x1a8, 0xfc000000); + else + rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000); + break; + default: + break; + } +}; + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR) + rtl8168g_phy_suspend_quirk(tp, value); + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (reg == 0x1f) + return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4; + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + return RTL_R32(tp, PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + return RTL_R32(tp, OCPAR) & OCPAR_FLAG; +} + +static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) +{ + RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); +} + +static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_1_mdio_access(tp, reg, + OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); +} + +static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) +{ + r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? + RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_2_mdio_start(tp); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(tp); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + /* Work around issue with chip reporting wrong PHY ID */ + if (reg == MII_PHYSID2) + return 0xc912; + + r8168dp_2_mdio_start(tp); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(tp); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, int val) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + r8168dp_1_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_2_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168g_mdio_write(tp, location, val); + break; + default: + r8169_mdio_write(tp, location, val); + break; + } +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + return r8168dp_1_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_2_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + return r8168g_mdio_read(tp, location); + default: + return r8169_mdio_read(tp, location); + } +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff)); + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(tp, OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + return _rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + RTL_W32(tp, OCPDR, data); + RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd); + + r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_dp_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return r8168ep_ocp_read(tp, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + return RTL_R8(tp, IBISR0) & 0x20; +} + +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); + rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000); + RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); + RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_start(tp); + else + rtl8168ep_driver_start(tp); +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_stop(tp); + else + rtl8168ep_driver_stop(tp); +} + +static bool r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & BIT(15); +} + +static bool r8168ep_check_dash(struct rtl8169_private *tp) +{ + return r8168ep_ocp_read(tp, 0x128) & BIT(0); +} + +static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53: + return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE; + default: + return RTL_DASH_NONE; + } +} + +static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + if (enable) + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN); + else + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN); + break; + default: + break; + } +} + +static void rtl_reset_packet_filter(struct rtl8169_private *tp) +{ + rtl_eri_clear_bits(tp, 0xdc, BIT(0)); + rtl_eri_set_bits(tp, 0xdc, BIT(0)); +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; +} + +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u32 rtl_get_events(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + return RTL_R32(tp, IntrStatus_8125); + else + return RTL_R16(tp, IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u32 bits) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrStatus_8125, bits); + else + RTL_W16(tp, IntrStatus, bits); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, 0); + else + RTL_W16(tp, IntrMask, 0); +} + +static void rtl_irq_enable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, tp->irq_mask); + else + RTL_W16(tp, IntrMask, tp->irq_mask); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + rtl_irq_disable(tp); + rtl_ack_events(tp, 0xffffffff); + rtl_pci_commit(tp); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else if (phydev->speed == SPEED_100) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + rtl_reset_packet_filter(tp); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (phydev->speed == SPEED_10) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + } + } +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + wol->supported = WAKE_ANY; + wol->wolopts = tp->saved_wolopts; +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } + }; + unsigned int i, tmp = ARRAY_SIZE(cfg); + u8 options; + + rtl_unlock_config_regs(tp); + + if (rtl_is_8168evl_up(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2); + else + rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2); + } else if (rtl_is_8125(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0)); + else + r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); + } + + for (i = 0; i < tmp; i++) { + options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(tp, cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + options = RTL_R8(tp, Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(tp, Config1, options); + break; + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + options = RTL_R8(tp, Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(tp, Config2, options); + break; + default: + break; + } + + rtl_lock_config_regs(tp); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); + rtl_set_d3_pll_down(tp, !wolopts); + tp->dev->wol_enabled = wolopts ? 1 : 0; +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + + tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); + + return 0; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (rtl_fw) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > ETH_DATA_LEN && + tp->mac_version > RTL_GIGA_MAC_VER_06) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO); + + return features; +} + +static void rtl_set_rx_config_features(struct rtl8169_private *tp, + netdev_features_t features) +{ + u32 rx_config = RTL_R32(tp, RxConfig); + + if (features & NETIF_F_RXALL) + rx_config |= RX_CONFIG_ACCEPT_ERR_MASK; + else + rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK; + + if (rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rx_config |= RX_VLAN_8125; + else + rx_config &= ~RX_VLAN_8125; + } + + RTL_W32(tp, RxConfig, rx_config); +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_set_rx_config_features(tp, features); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (!rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + } + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; + + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); +} + +static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) +{ + u32 cmd = lower_32_bits(tp->counters_phys_addr); + + RTL_W32(tp, CounterAddrHigh, upper_32_bits(tp->counters_phys_addr)); + rtl_pci_commit(tp); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); + + rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); +} + +static void rtl8169_update_counters(struct rtl8169_private *tp) +{ + u8 val = RTL_R8(tp, ChipCmd); + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. If 0xff chip may be in a PCI power-save state. + */ + if (val & CmdRxEnb && val != 0xff) + rtl8169_do_counters(tp, CounterDump); +} + +static void rtl8169_init_counter_offsets(struct rtl8169_private *tp) +{ + struct rtl8169_counters *counters = tp->counters; + + /* + * rtl8169_init_counter_offsets is called from rtl_open. On chip + * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only + * reset by a power cycle, while the counter values collected by the + * driver are reset at every driver unload/load cycle. + * + * To make sure the HW values returned by @get_stats64 match the SW + * values, we collect the initial values at first open(*) and use them + * as offsets to normalize the values returned by @get_stats64. + * + * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one + * for the reason stated in rtl8169_update_counters; CmdRxEnb is only + * set at open time by rtl_hw_start. + */ + + if (tp->tc_offset.inited) + return; + + if (tp->mac_version >= RTL_GIGA_MAC_VER_19) { + rtl8169_do_counters(tp, CounterReset); + } else { + rtl8169_update_counters(tp); + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.rx_missed = counters->rx_missed; + } + + tp->tc_offset.inited = true; +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters; + + counters = tp->counters; + rtl8169_update_counters(tp); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +/* + * Interrupt coalescing + * + * > 1 - the availability of the IntrMitigate (0xe2) register through the + * > 8169, 8168 and 810x line of chipsets + * + * 8169, 8168, and 8136(810x) serial chipsets support it. + * + * > 2 - the Tx timer unit at gigabit speed + * + * The unit of the timer depends on both the speed and the setting of CPlusCmd + * (0xe0) bit 1 and bit 0. + * + * For 8169 + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 320ns 2.56us 40.96us + * 0 1 2.56us 20.48us 327.7us + * 1 0 5.12us 40.96us 655.4us + * 1 1 10.24us 81.92us 1.31ms + * + * For the other + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 5us 2.56us 40.96us + * 0 1 40us 20.48us 327.7us + * 1 0 80us 40.96us 655.4us + * 1 1 160us 81.92us 1.31ms + */ + +/* rx/tx scale factors for all CPlusCmd[0:1] cases */ +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +/* produce array with base delay *1, *8, *8*2, *8*2*2 */ +#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) } + +static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { + { SPEED_1000, COALESCE_DELAY(320) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; + +static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { + { SPEED_1000, COALESCE_DELAY(5000) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; +#undef COALESCE_DELAY + +/* get rx/tx scale vector corresponding to current speed */ +static const struct rtl_coalesce_info * +rtl_coalesce_info(struct rtl8169_private *tp) +{ + const struct rtl_coalesce_info *ci; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + ci = rtl_coalesce_info_8169; + else + ci = rtl_coalesce_info_8168_8136; + + /* if speed is unknown assume highest one */ + if (tp->phydev->speed == SPEED_UNKNOWN) + return ci; + + for (; ci->speed; ci++) { + if (tp->phydev->speed == ci->speed) + return ci; + } + + return ERR_PTR(-ELNRNG); +} + +static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct rtl8169_private *tp = netdev_priv(dev); + const struct rtl_coalesce_info *ci; + u32 scale, c_us, c_fr; + u16 intrmit; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + memset(ec, 0, sizeof(*ec)); + + /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK]; + + intrmit = RTL_R16(tp, IntrMitigate); + + c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit); + ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit); + /* ethtool_coalesce states usecs and max_frames must not both be 0 */ + ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit); + ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit); + ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + return 0; +} + +/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */ +static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec, + u16 *cp01) +{ + const struct rtl_coalesce_info *ci; + u16 i; + + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + for (i = 0; i < 4; i++) { + if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) { + *cp01 = i; + return ci->scale_nsecs[i]; + } + } + + return -ERANGE; +} + +static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 tx_fr = ec->tx_max_coalesced_frames; + u32 rx_fr = ec->rx_max_coalesced_frames; + u32 coal_usec_max, units; + u16 w = 0, cp01 = 0; + int scale; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX) + return -ERANGE; + + coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs); + scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01); + if (scale < 0) + return scale; + + /* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it + * not only when usecs=0 because of e.g. the following scenario: + * + * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX) + * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1 + * - then user does `ethtool -C eth0 rx-usecs 100` + * + * Since ethtool sends to kernel whole ethtool_coalesce settings, + * if we want to ignore rx_frames then it has to be set to 0. + */ + if (rx_fr == 1) + rx_fr = 0; + if (tx_fr == 1) + tx_fr = 0; + + /* HW requires time limit to be set if frame limit is set */ + if ((tx_fr && !ec->tx_coalesce_usecs) || + (rx_fr && !ec->rx_coalesce_usecs)) + return -EINVAL; + + w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4)); + w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4)); + + units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units); + units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units); + + RTL_W16(tp, IntrMitigate, w); + + /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */ + if (rtl_is_8168evl_up(tp)) { + if (!rx_fr && !tx_fr) + /* disable packet counter */ + tp->cp_cmd |= PktCntrDisable; + else + tp->cp_cmd &= ~PktCntrDisable; + } + + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + return phy_ethtool_get_eee(tp->phydev, data); +} + +static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_eee(tp->phydev, data); + + if (!ret) + tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, + MDIO_AN_EEE_ADV); + return ret; +} + +static void rtl8169_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *data) +{ + data->rx_max_pending = NUM_RX_DESC; + data->rx_pending = NUM_RX_DESC; + data->tx_max_pending = NUM_TX_DESC; + data->tx_pending = NUM_TX_DESC; +} + +static void rtl8169_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + bool tx_pause, rx_pause; + + phy_get_pause(tp->phydev, &tx_pause, &rx_pause); + + data->autoneg = tp->phydev->autoneg; + data->tx_pause = tx_pause ? 1 : 0; + data->rx_pause = rx_pause ? 1 : 0; +} + +static int rtl8169_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN) + return -EOPNOTSUPP; + + phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause); + + return 0; +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_coalesce = rtl_get_coalesce, + .set_coalesce = rtl_set_coalesce, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, + .nway_reset = phy_ethtool_nway_reset, + .get_eee = rtl8169_get_eee, + .set_eee = rtl8169_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = rtl8169_get_ringparam, + .get_pauseparam = rtl8169_get_pauseparam, + .set_pauseparam = rtl8169_set_pauseparam, +}; + +static void rtl_enable_eee(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int adv; + + /* respect EEE advertisement the user may have set */ + if (tp->eee_adv >= 0) + adv = tp->eee_adv; + else + adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + + if (adv >= 0) + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); +} + +static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) +{ + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; + } mac_info[] = { + /* 8125B family. */ + { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, + + /* 8125A family. */ + { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, + { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + + /* RTL8117 */ + { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 }, + { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 }, + + /* 8168EP family. */ + { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, + { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, + { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, + + /* 8168H family. */ + { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 }, + { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, + + /* 8168G family. */ + { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, + { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 }, + { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, + { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 }, + { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, + { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 }, + { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 }, + { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 }, + { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + /* It seems this early RTL8168dp version never made it to + * the wild. Let's see whether somebody complains, if not + * we'll remove support for this chip version completely. + * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, + */ + { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, + { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 }, + { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 }, + { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 }, + { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 }, + { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 }, + { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 }, + { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 }, + { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, + { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, + { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 }, + { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 }, + { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 }, + { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 }, + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 }, + { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 }, + { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 }, + { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR + * Not even r8101 vendor driver knows these id's, + * so let's disable detection for now. -- HK + * { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 }, + * { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 }, + */ + + /* 8110 family. */ + { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 }, + { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 }, + { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 }, + { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 }, + { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 }, + + /* Catch-all */ + { 0x000, 0x000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + enum mac_version ver; + + while ((xid & p->mask) != p->val) + p++; + ver = p->ver; + + if (ver != RTL_GIGA_MAC_NONE && !gmii) { + if (ver == RTL_GIGA_MAC_VER_42) + ver = RTL_GIGA_MAC_VER_43; + else if (ver == RTL_GIGA_MAC_VER_45) + ver = RTL_GIGA_MAC_VER_47; + else if (ver == RTL_GIGA_MAC_VER_46) + ver = RTL_GIGA_MAC_VER_48; + } + + return ver; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (tp->rtl_fw) { + rtl_fw_release_firmware(tp->rtl_fw); + kfree(tp->rtl_fw); + tp->rtl_fw = NULL; + } +} + +void r8169_apply_firmware(struct rtl8169_private *tp) +{ + int val; + + /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ + if (tp->rtl_fw) { + rtl_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + + /* PHY soft reset may still be in progress */ + phy_read_poll_timeout(tp->phydev, MII_BMCR, val, + !(val & BMCR_RESET), + 50000, 600000, true); + } +} + +static void rtl8168_config_eee_mac(struct rtl8169_private *tp) +{ + /* Adjust EEE LED frequency */ + if (tp->mac_version != RTL_GIGA_MAC_VER_38) + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + rtl_eri_set_bits(tp, 0x1b0, 0x0003); +} + +static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) +{ + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); + r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); +} + +static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp) +{ + RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20); +} + +static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) +{ + rtl8125_set_eee_txidle_timer(tp); + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr)); + rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4)); + rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16); + rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2)); +} + +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) +{ + u16 data1, data2, ioffset; + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data1 = r8168_mac_ocp_read(tp, 0xdd02); + data2 = r8168_mac_ocp_read(tp, 0xdd00); + + ioffset = (data2 >> 1) & 0x7ff8; + ioffset |= data2 & 0x0007; + if (data1 & BIT(7)) + ioffset |= BIT(15); + + return ioffset; +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + set_bit(flag, tp->wk.flags); + schedule_work(&tp->wk.work); +} + +static void rtl8169_init_phy(struct rtl8169_private *tp) +{ + r8169_hw_phy_config(tp, tp->phydev, tp->mac_version); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + /* set undocumented MAC Reg C+CR Offset 0x82h */ + RTL_W8(tp, 0x82, 0x01); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_05 && + tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE && + tp->pci_dev->subsystem_device == 0xe000) + phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b); + + /* We may have called phy_speed_down before */ + phy_speed_up(tp->phydev); + + if (rtl_supports_eee(tp)) + rtl_enable_eee(tp); + + genphy_soft_reset(tp->phydev); +} + +static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_unlock_config_regs(tp); + + RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4)); + rtl_pci_commit(tp); + + RTL_W32(tp, MAC0, get_unaligned_le32(addr)); + rtl_pci_commit(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + rtl_lock_config_regs(tp); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static void rtl_wol_enable_rx(struct rtl8169_private *tp) +{ + if (tp->mac_version >= RTL_GIGA_MAC_VER_25) + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); +} + +static void rtl_prepare_power_down(struct rtl8169_private *tp) +{ + if (tp->dash_type != RTL_DASH_NONE) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (device_may_wakeup(tp_to_dev(tp))) { + phy_speed_down(tp->phydev, false); + rtl_wol_enable_rx(tp); + } +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); + break; + default: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x24); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); +} + +static void rtl_jumbo_config(struct rtl8169_private *tp) +{ + bool jumbo = tp->dev->mtu > ETH_DATA_LEN; + int readrq = 4096; + + rtl_unlock_config_regs(tp); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + if (jumbo) { + readrq = 512; + r8168b_1_hw_jumbo_enable(tp); + } else { + r8168b_1_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: + if (jumbo) { + readrq = 512; + r8168c_hw_jumbo_enable(tp); + } else { + r8168c_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: + if (jumbo) + r8168dp_hw_jumbo_enable(tp); + else + r8168dp_hw_jumbo_disable(tp); + break; + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: + if (jumbo) + r8168e_hw_jumbo_enable(tp); + else + r8168e_hw_jumbo_disable(tp); + break; + default: + break; + } + rtl_lock_config_regs(tp); + + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) + pcie_set_readrq(tp->pci_dev, readrq); + + /* Chip doesn't support pause in jumbo mode */ + if (jumbo) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising); + phy_start_aneg(tp->phydev); + } +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + return RTL_R8(tp, ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + RTL_W8(tp, ChipCmd, CmdReset); + + rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + + /* firmware loaded already or no firmware available */ + if (tp->rtl_fw || !tp->fw_name) + return; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + return; + + rtl_fw->phy_write = rtl_writephy; + rtl_fw->phy_read = rtl_readphy; + rtl_fw->mac_mcu_write = mac_mcu_write; + rtl_fw->mac_mcu_read = mac_mcu_read; + rtl_fw->fw_name = tp->fw_name; + rtl_fw->dev = tp_to_dev(tp); + + if (rtl_fw_request_firmware(rtl_fw)) + kfree(rtl_fw); + else + tp->rtl_fw = rtl_fw; +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + return RTL_R8(tp, TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond_2) +{ + /* IntrMitigate has new functionality on RTL8125 */ + return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103; +} + +static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_63: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); + break; + default: + break; + } +} + +static void rtl_enable_rxdvgate(struct rtl8169_private *tp) +{ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); + fsleep(2000); + rtl_wait_txrx_fifo_empty(tp); +} + +static void rtl_set_tx_config_registers(struct rtl8169_private *tp) +{ + u32 val = TX_DMA_BURST << TxDMAShift | + InterFrameGap << TxInterFrameGapShift; + + if (rtl_is_8168evl_up(tp)) + val |= TXCFG_AUTO_FIFO; + + RTL_W32(tp, TxConfig, val); +} + +static void rtl_set_rx_max_size(struct rtl8169_private *tp) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static void rtl8169_set_magic_reg(struct rtl8169_private *tp) +{ + u32 val; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + val = 0x000fff00; + else if (tp->mac_version == RTL_GIGA_MAC_VER_06) + val = 0x00ffff00; + else + return; + + if (RTL_R8(tp, Config2) & PCI_Clock_66MHz) + val |= 0xff; + + RTL_W32(tp, 0x7c, val); +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast; + /* Multicast hash filter */ + u32 mc_filter[2] = { 0xffffffff, 0xffffffff }; + struct rtl8169_private *tp = netdev_priv(dev); + u32 tmp; + + if (dev->flags & IFF_PROMISC) { + rx_mode |= AcceptAllPhys; + } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || + dev->flags & IFF_ALLMULTI || + tp->mac_version == RTL_GIGA_MAC_VER_35) { + /* accept all multicasts */ + } else if (netdev_mc_empty(dev)) { + rx_mode &= ~AcceptMulticast; + } else { + struct netdev_hw_addr *ha; + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + u32 bit_nr = eth_hw_addr_crc(ha) >> 26; + mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31); + } + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); + } + } + + RTL_W32(tp, MAR0 + 4, mc_filter[1]); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); + + tmp = RTL_R32(tp, RxConfig); + RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + return RTL_R32(tp, CSIAR) & CSIAR_FLAG; +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE | func << 16); + + rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | + CSIAR_BYTE_ENABLE); + + return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(tp, CSIDR) : ~0; +} + +static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) +{ + struct pci_dev *pdev = tp->pci_dev; + u32 csi; + + /* According to Realtek the value at config space address 0x070f + * controls the L0s/L1 entrance latency. We try standard ECAM access + * first and if it fails fall back to CSI. + */ + if (pdev->cfg_size > 0x070f && + pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) + return; + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | val << 24); +} + +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x27); +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void __rtl_ephy_init(struct rtl8169_private *tp, + const struct ephy_info *e, int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a)) + +static void rtl_disable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) +{ + /* work around an issue when PCI reset occurs during L2/L3 state */ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); +} + +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + /* Don't enable ASPM in the chip if OS can't control ASPM */ + if (enable && tp->aspm_manageable) { + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } + + udelay(10); +} + +static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat, + u16 tx_stat, u16 rx_dyn, u16 tx_dyn) +{ + /* Usage of dynamic vs. static FIFO is controlled by bit + * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known. + */ + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn); +} + +static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp, + u8 low, u8 high) +{ + /* FIFO thresholds for pause flow control */ + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high); +} + +static void rtl_hw_start_8168b(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168cp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(tp, DBG_REG, 0x20); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0020 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168c_2); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, 0x0000, 0x0048 }, + { 0x19, 0x0020, 0x0050 }, + { 0x0c, 0x0100, 0x0020 }, + { 0x10, 0x0004, 0x0000 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168d_4); + + rtl_enable_clock_request(tp); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_1); + + rtl_disable_clock_request(tp); + + /* Reset tx FIFO pointer */ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); + + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_2); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_eri_set_bits(tp, 0x0d4, 0x1f00); + rtl_eri_set_bits(tp, 0x1d0, BIT(1)); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl8168_config_eee_mac(tp); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1); + + rtl_eri_set_bits(tp, 0x0d4, 0x1f00); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_disable(tp); + + rtl_ephy_init(tp, e_info_8168f_1); + + rtl_eri_set_bits(tp, 0x0d4, 0x0c00); +} + +static void rtl_hw_start_8168g(struct rtl8169_private *tp) +{ + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_eri_set_bits(tp, 0x0d4, 0x1f80); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_1[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8000, 0x0000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_1); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 }, + { 0x00, 0xffff, 0x10a3 }, + { 0x06, 0xffff, 0xf050 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x4000, 0x0000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_2); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x00, 0x0000, 0x0080 }, + { 0x06, 0x0000, 0x0010 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x0000, 0x4000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8411_2); + + /* The following Realtek-provided magic fixes an issue with the RX unit + * getting confused after the PHY having been powered-down. + */ + r8168_mac_ocp_write(tp, 0xFC28, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + r8168_mac_ocp_write(tp, 0xFC30, 0x0000); + r8168_mac_ocp_write(tp, 0xFC32, 0x0000); + r8168_mac_ocp_write(tp, 0xFC34, 0x0000); + r8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + r8168_mac_ocp_write(tp, 0xFC26, 0x0000); + + r8168_mac_ocp_write(tp, 0xF800, 0xE008); + r8168_mac_ocp_write(tp, 0xF802, 0xE00A); + r8168_mac_ocp_write(tp, 0xF804, 0xE00C); + r8168_mac_ocp_write(tp, 0xF806, 0xE00E); + r8168_mac_ocp_write(tp, 0xF808, 0xE027); + r8168_mac_ocp_write(tp, 0xF80A, 0xE04F); + r8168_mac_ocp_write(tp, 0xF80C, 0xE05E); + r8168_mac_ocp_write(tp, 0xF80E, 0xE065); + r8168_mac_ocp_write(tp, 0xF810, 0xC602); + r8168_mac_ocp_write(tp, 0xF812, 0xBE00); + r8168_mac_ocp_write(tp, 0xF814, 0x0000); + r8168_mac_ocp_write(tp, 0xF816, 0xC502); + r8168_mac_ocp_write(tp, 0xF818, 0xBD00); + r8168_mac_ocp_write(tp, 0xF81A, 0x074C); + r8168_mac_ocp_write(tp, 0xF81C, 0xC302); + r8168_mac_ocp_write(tp, 0xF81E, 0xBB00); + r8168_mac_ocp_write(tp, 0xF820, 0x080A); + r8168_mac_ocp_write(tp, 0xF822, 0x6420); + r8168_mac_ocp_write(tp, 0xF824, 0x48C2); + r8168_mac_ocp_write(tp, 0xF826, 0x8C20); + r8168_mac_ocp_write(tp, 0xF828, 0xC516); + r8168_mac_ocp_write(tp, 0xF82A, 0x64A4); + r8168_mac_ocp_write(tp, 0xF82C, 0x49C0); + r8168_mac_ocp_write(tp, 0xF82E, 0xF009); + r8168_mac_ocp_write(tp, 0xF830, 0x74A2); + r8168_mac_ocp_write(tp, 0xF832, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF834, 0x74A0); + r8168_mac_ocp_write(tp, 0xF836, 0xC50E); + r8168_mac_ocp_write(tp, 0xF838, 0x9CA2); + r8168_mac_ocp_write(tp, 0xF83A, 0x1C11); + r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF83E, 0xE006); + r8168_mac_ocp_write(tp, 0xF840, 0x74F8); + r8168_mac_ocp_write(tp, 0xF842, 0x48C4); + r8168_mac_ocp_write(tp, 0xF844, 0x8CF8); + r8168_mac_ocp_write(tp, 0xF846, 0xC404); + r8168_mac_ocp_write(tp, 0xF848, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84A, 0xC403); + r8168_mac_ocp_write(tp, 0xF84C, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2); + r8168_mac_ocp_write(tp, 0xF850, 0x0C0A); + r8168_mac_ocp_write(tp, 0xF852, 0xE434); + r8168_mac_ocp_write(tp, 0xF854, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF856, 0x49D9); + r8168_mac_ocp_write(tp, 0xF858, 0xF01F); + r8168_mac_ocp_write(tp, 0xF85A, 0xC526); + r8168_mac_ocp_write(tp, 0xF85C, 0x64A5); + r8168_mac_ocp_write(tp, 0xF85E, 0x1400); + r8168_mac_ocp_write(tp, 0xF860, 0xF007); + r8168_mac_ocp_write(tp, 0xF862, 0x0C01); + r8168_mac_ocp_write(tp, 0xF864, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF866, 0x1C15); + r8168_mac_ocp_write(tp, 0xF868, 0xC51B); + r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF86C, 0xE013); + r8168_mac_ocp_write(tp, 0xF86E, 0xC519); + r8168_mac_ocp_write(tp, 0xF870, 0x74A0); + r8168_mac_ocp_write(tp, 0xF872, 0x48C4); + r8168_mac_ocp_write(tp, 0xF874, 0x8CA0); + r8168_mac_ocp_write(tp, 0xF876, 0xC516); + r8168_mac_ocp_write(tp, 0xF878, 0x74A4); + r8168_mac_ocp_write(tp, 0xF87A, 0x48C8); + r8168_mac_ocp_write(tp, 0xF87C, 0x48CA); + r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4); + r8168_mac_ocp_write(tp, 0xF880, 0xC512); + r8168_mac_ocp_write(tp, 0xF882, 0x1B00); + r8168_mac_ocp_write(tp, 0xF884, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF886, 0x1B1C); + r8168_mac_ocp_write(tp, 0xF888, 0x483F); + r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2); + r8168_mac_ocp_write(tp, 0xF88C, 0x1B04); + r8168_mac_ocp_write(tp, 0xF88E, 0xC508); + r8168_mac_ocp_write(tp, 0xF890, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF892, 0xC505); + r8168_mac_ocp_write(tp, 0xF894, 0xBD00); + r8168_mac_ocp_write(tp, 0xF896, 0xC502); + r8168_mac_ocp_write(tp, 0xF898, 0xBD00); + r8168_mac_ocp_write(tp, 0xF89A, 0x0300); + r8168_mac_ocp_write(tp, 0xF89C, 0x051E); + r8168_mac_ocp_write(tp, 0xF89E, 0xE434); + r8168_mac_ocp_write(tp, 0xF8A0, 0xE018); + r8168_mac_ocp_write(tp, 0xF8A2, 0xE092); + r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20); + r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F); + r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4); + r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3); + r8168_mac_ocp_write(tp, 0xF8AE, 0xF007); + r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0); + r8168_mac_ocp_write(tp, 0xF8B2, 0xF103); + r8168_mac_ocp_write(tp, 0xF8B4, 0xC607); + r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8B8, 0xC606); + r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8BC, 0xC602); + r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C); + r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28); + r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C); + r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00); + r8168_mac_ocp_write(tp, 0xF8C8, 0xC707); + r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00); + r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2); + r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1); + r8168_mac_ocp_write(tp, 0xF8D0, 0xC502); + r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA); + r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0); + r8168_mac_ocp_write(tp, 0xF8D8, 0xC502); + r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8DC, 0x0132); + + r8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + r8168_mac_ocp_write(tp, 0xFC2A, 0x0743); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0801); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9); + r8168_mac_ocp_write(tp, 0xFC30, 0x02FD); + r8168_mac_ocp_write(tp, 0xFC32, 0x0C25); + r8168_mac_ocp_write(tp, 0xFC34, 0x00A9); + r8168_mac_ocp_write(tp, 0xFC36, 0x012D); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x854a }, + { 0x01, 0xffff, 0x068b } + }; + int rg_saw_cnt; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168h_1); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + rtl_eri_set_bits(tp, 0xdc, 0x001c); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008); + r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f80); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_1[] = { + { 0x00, 0xffff, 0x10ab }, + { 0x06, 0xffff, 0xf030 }, + { 0x08, 0xffff, 0x2006 }, + { 0x0d, 0xffff, 0x1666 }, + { 0x0c, 0x3ff0, 0x0000 } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_1); + + rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_2[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20ea } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_2); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0x0000, 0x0080 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_3); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8117(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8117[] = { + { 0x19, 0x0040, 0x1100 }, + { 0x59, 0x0040, 0x1100 }, + }; + int rg_saw_cnt; + + rtl8168ep_stop_cmac(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8117); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x1f90); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_write(tp, 0xea80, 0x0003); + r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + /* firmware is for MAC only */ + r8169_apply_firmware(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, FIX_NAK_1); + + RTL_W8(tp, Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + cfg1 = RTL_R8(tp, Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(tp, Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8401(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8401[] = { + { 0x01, 0xffff, 0x6fe5 }, + { 0x03, 0xffff, 0x0599 }, + { 0x06, 0xffff, 0xaf25 }, + { 0x07, 0xffff, 0x8e68 }, + }; + + rtl_ephy_init(tp, e_info_8401); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402); + + rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + rtl_hw_aspm_clkreq_enable(tp, false); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + + /* The default value is 0x13. Change it to 0x2f */ + rtl_csi_access_enable(tp, 0x2f); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond) +{ + return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13); +} + +static void rtl_hw_start_8125_common(struct rtl8169_private *tp) +{ + rtl_pcie_state_l2l3_disable(tp); + + RTL_W16(tp, 0x382, 0x221b); + RTL_W8(tp, 0x4500, 0); + RTL_W16(tp, 0x4800, 0); + + /* disable UPS */ + r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10); + + r8168_mac_ocp_write(tp, 0xc140, 0xffff); + r8168_mac_ocp_write(tp, 0xc142, 0xffff); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + /* disable new tx descriptor format */ + r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); + else + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000); + else + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); + r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); + r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); + r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); + r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); + r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); + r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00); + r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); + + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001); + udelay(1); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000); + RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030); + + r8168_mac_ocp_write(tp, 0xe098, 0xc302); + + rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + rtl8125b_config_eee_mac(tp); + else + rtl8125a_config_eee_mac(tp); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + udelay(10); +} + +static void rtl_hw_start_8125a_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_1[] = { + { 0x01, 0xffff, 0xa812 }, + { 0x09, 0xffff, 0x520c }, + { 0x04, 0xffff, 0xd000 }, + { 0x0d, 0xffff, 0xf702 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x06, 0xffff, 0x001e }, + { 0x08, 0xffff, 0x3595 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x02, 0xffff, 0x6046 }, + { 0x29, 0xffff, 0xfe00 }, + { 0x23, 0xffff, 0xab62 }, + + { 0x41, 0xffff, 0xa80c }, + { 0x49, 0xffff, 0x520c }, + { 0x44, 0xffff, 0xd000 }, + { 0x4d, 0xffff, 0xf702 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x46, 0xffff, 0x001e }, + { 0x48, 0xffff, 0x3595 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x42, 0xffff, 0x6046 }, + { 0x69, 0xffff, 0xfe00 }, + { 0x63, 0xffff, 0xab62 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_1); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_2[] = { + { 0x04, 0xffff, 0xd000 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x23, 0xffff, 0xab66 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x29, 0xffff, 0xfe04 }, + + { 0x44, 0xffff, 0xd000 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x63, 0xffff, 0xab66 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x69, 0xffff, 0xfe04 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_2); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125b(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125b[] = { + { 0x0b, 0xffff, 0xa908 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x4b, 0xffff, 0xa908 }, + { 0x5e, 0xffff, 0x20eb }, + { 0x22, 0x0030, 0x0020 }, + { 0x62, 0x0030, 0x0020 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_aspm_clkreq_enable(tp, false); + + rtl_ephy_init(tp, e_info_8125b); + rtl_hw_start_8125_common(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_config(struct rtl8169_private *tp) +{ + static const rtl_generic_fct hw_configs[] = { + [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1, + [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, + [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, + [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1, + [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4, + [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2, + [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, + [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, + [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, + [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, + [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2, + [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402, + [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411, + [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106, + [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2, + [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1, + [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2, + [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1, + [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, + [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, + }; + + if (hw_configs[tp->mac_version]) + hw_configs[tp->mac_version](tp); +} + +static void rtl_hw_start_8125(struct rtl8169_private *tp) +{ + int i; + + /* disable interrupt coalescing */ + for (i = 0xa00; i < 0xb00; i += 4) + RTL_W32(tp, i, 0); + + rtl_hw_config(tp); +} + +static void rtl_hw_start_8168(struct rtl8169_private *tp) +{ + if (rtl_is_8168evl_up(tp)) + RTL_W8(tp, MaxTxPacketSize, EarlySize); + else + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + rtl_hw_config(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start_8169(struct rtl8169_private *tp) +{ + RTL_W8(tp, EarlyTxThres, NoEarlyTx); + + tp->cp_cmd |= PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) + tp->cp_cmd |= EnAnaPLL; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start(struct rtl8169_private *tp) +{ + rtl_unlock_config_regs(tp); + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + rtl_hw_start_8169(tp); + else if (rtl_is_8125(tp)) + rtl_hw_start_8125(tp); + else + rtl_hw_start_8168(tp); + + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + rtl_lock_config_regs(tp); + + rtl_jumbo_config(tp); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + rtl_pci_commit(tp); + + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_init_rxcfg(tp); + rtl_set_tx_config_registers(tp); + rtl_set_rx_config_features(tp, tp->dev->features); + rtl_set_rx_mode(tp->dev); + rtl_irq_enable(tp); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + dev->mtu = new_mtu; + netdev_update_features(dev); + rtl_jumbo_config(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + rtl8125_set_eee_txidle_timer(tp); + break; + default: + break; + } + + return 0; +} + +static void rtl8169_mark_to_asic(struct RxDesc *desc) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts2 = 0; + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE)); +} + +static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + struct device *d = tp_to_dev(tp); + int node = dev_to_node(d); + dma_addr_t mapping; + struct page *data; + + data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE)); + if (!data) + return NULL; + + mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + netdev_err(tp->dev, "Failed to map RX DMA!\n"); + __free_pages(data, get_order(R8169_RX_BUF_SIZE)); + return NULL; + } + + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc); + + return data; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) { + dma_unmap_page(tp_to_dev(tp), + le64_to_cpu(tp->RxDescArray[i].addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE)); + tp->Rx_databuff[i] = NULL; + tp->RxDescArray[i].addr = 0; + tp->RxDescArray[i].opts1 = 0; + } +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + struct page *data; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_rx_clear(tp); + return -ENOMEM; + } + tp->Rx_databuff[i] = data; + } + + /* mark as last descriptor in the ring */ + tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd); + + return 0; +} + +static int rtl8169_init_ring(struct rtl8169_private *tp) +{ + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); + memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry) +{ + struct ring_info *tx_skb = tp->tx_skb + entry; + struct TxDesc *desc = tp->TxDescArray + entry; + + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len, + DMA_TO_DEVICE); + memset(desc, 0, sizeof(*desc)); + memset(tx_skb, 0, sizeof(*tx_skb)); +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(tp, entry); + if (skb) + dev_consume_skb_any(skb); + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + netdev_reset_queue(tp->dev); +} + +static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) +{ + napi_disable(&tp->napi); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_net(); + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + if (going_down && tp->dev->wol_enabled) + goto no_reset; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + rtl_enable_rxdvgate(tp); + fsleep(2000); + break; + default: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + fsleep(100); + break; + } + + rtl_hw_reset(tp); +no_reset: + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + int i; + + netif_stop_queue(tp->dev); + + rtl8169_cleanup(tp, false); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i); + + napi_enable(&tp->napi); + rtl_hw_start(tp); +} + +static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len, + void *addr, unsigned int entry, bool desc_own) +{ + struct TxDesc *txd = tp->TxDescArray + entry; + struct device *d = tp_to_dev(tp); + dma_addr_t mapping; + u32 opts1; + int ret; + + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + ret = dma_mapping_error(d, mapping); + if (unlikely(ret)) { + if (net_ratelimit()) + netdev_err(tp->dev, "Failed to map TX data!\n"); + return ret; + } + + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts[1]); + + opts1 = opts[0] | len; + if (entry == NUM_TX_DESC - 1) + opts1 |= RingEnd; + if (desc_own) + opts1 |= DescOwn; + txd->opts1 = cpu_to_le32(opts1); + + tp->tx_skb[entry].len = len; + + return 0; +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + const u32 *opts, unsigned int entry) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag; + + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + void *addr = skb_frag_address(frag); + u32 len = skb_frag_size(frag); + + entry = (entry + 1) % NUM_TX_DESC; + + if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true))) + goto err_out; + } + + return 0; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_skb_is_udp(struct sk_buff *skb) +{ + int no = skb_network_offset(skb); + struct ipv6hdr *i6h, _i6h; + struct iphdr *ih, _ih; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih); + return ih && ih->protocol == IPPROTO_UDP; + case htons(ETH_P_IPV6): + i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h); + return i6h && i6h->nexthdr == IPPROTO_UDP; + default: + return false; + } +} + +#define RTL_MIN_PATCH_LEN 47 + +/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */ +static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto = 0, len = skb->len; + + if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && + rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { + unsigned int trans_data_len = skb_tail_pointer(skb) - + skb_transport_header(skb); + + if (trans_data_len >= offsetof(struct udphdr, len) && + trans_data_len < RTL_MIN_PATCH_LEN) { + u16 dest = ntohs(udp_hdr(skb)->dest); + + /* dest is a standard PTP port */ + if (dest == 319 || dest == 320) + padto = len + RTL_MIN_PATCH_LEN - trans_data_len; + } + + if (trans_data_len < sizeof(struct udphdr)) + padto = max_t(unsigned int, padto, + len + sizeof(struct udphdr) - trans_data_len); + } + + return padto; +} + +static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto; + + padto = rtl8125_quirk_udp_padto(tp, skb); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_60: + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + padto = max_t(unsigned int, padto, ETH_ZLEN); + break; + default: + break; + } + + return padto; +} + +static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) +{ + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + opts[0] |= TD_LSO; + opts[0] |= mss << TD0_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[0] |= TD0_IP_CS | TD0_TCP_CS; + else if (ip->protocol == IPPROTO_UDP) + opts[0] |= TD0_IP_CS | TD0_UDP_CS; + else + WARN_ON_ONCE(1); + } +} + +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 transport_offset = (u32)skb_transport_offset(skb); + struct skb_shared_info *shinfo = skb_shinfo(skb); + u32 mss = shinfo->gso_size; + + if (mss) { + if (shinfo->gso_type & SKB_GSO_TCPV4) { + opts[0] |= TD1_GTSENV4; + } else if (shinfo->gso_type & SKB_GSO_TCPV6) { + if (skb_cow_head(skb, 0)) + return false; + + tcp_v6_gso_csum_prep(skb); + opts[0] |= TD1_GTSENV6; + } else { + WARN_ON_ONCE(1); + } + + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= mss << TD1_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_protocol; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + opts[1] |= TD1_IPv4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts[1] |= TD1_IPv6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + opts[1] |= TD1_TCP_CS; + else if (ip_protocol == IPPROTO_UDP) + opts[1] |= TD1_UDP_CS; + else + WARN_ON_ONCE(1); + + opts[1] |= transport_offset << TCPHO_SHIFT; + } else { + unsigned int padto = rtl_quirk_packet_padto(tp, skb); + + /* skb_padto would free the skb on error */ + return !__skb_put_padto(skb, padto, false); + } + + return true; +} + +static bool rtl_tx_slots_avail(struct rtl8169_private *tp) +{ + unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC + - READ_ONCE(tp->cur_tx); + + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ + return slots_avail > MAX_SKB_FRAGS; +} + +/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ +static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + return false; + default: + return true; + } +} + +static void rtl8169_doorbell(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W16(tp, TxPoll_8125, BIT(0)); + else + RTL_W8(tp, TxPoll, NPQ); +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int frags = skb_shinfo(skb)->nr_frags; + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd_first, *txd_last; + bool stop_queue, door_bell; + u32 opts[2]; + + if (unlikely(!rtl_tx_slots_avail(tp))) { + if (net_ratelimit()) + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + opts[1] = rtl8169_tx_vlan_tag(skb); + opts[0] = 0; + + if (!rtl_chip_supports_csum_v2(tp)) + rtl8169_tso_csum_v1(skb, opts); + else if (!rtl8169_tso_csum_v2(tp, skb, opts)) + goto err_dma_0; + + if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data, + entry, false))) + goto err_dma_0; + + txd_first = tp->TxDescArray + entry; + + if (frags) { + if (rtl8169_xmit_frags(tp, skb, opts, entry)) + goto err_dma_1; + entry = (entry + frags) % NUM_TX_DESC; + } + + txd_last = tp->TxDescArray + entry; + txd_last->opts1 |= cpu_to_le32(LastFrag); + tp->tx_skb[entry].skb = skb; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + + txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag); + + /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */ + smp_wmb(); + + WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1); + + stop_queue = !rtl_tx_slots_avail(tp); + if (unlikely(stop_queue)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb__after_atomic(); + if (rtl_tx_slots_avail(tp)) + netif_start_queue(dev); + door_bell = true; + } + + if (door_bell) + rtl8169_doorbell(tp); + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(tp, entry); +err_dma_0: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static unsigned int rtl_last_frag_len(struct sk_buff *skb) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int nr_frags = info->nr_frags; + + if (!nr_frags) + return UINT_MAX; + + return skb_frag_size(info->frags + nr_frags - 1); +} + +/* Workaround for hw issues with TSO on RTL8168evl */ +static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb, + netdev_features_t features) +{ + /* IPv4 header has options field */ + if (vlan_get_protocol(skb) == htons(ETH_P_IP) && + ip_hdrlen(skb) > sizeof(struct iphdr)) + features &= ~NETIF_F_ALL_TSO; + + /* IPv4 TCP header has options field */ + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 && + tcp_hdrlen(skb) > sizeof(struct tcphdr)) + features &= ~NETIF_F_ALL_TSO; + + else if (rtl_last_frag_len(skb) <= 6) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static netdev_features_t rtl8169_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + int transport_offset = skb_transport_offset(skb); + struct rtl8169_private *tp = netdev_priv(dev); + + if (skb_is_gso(skb)) { + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + features = rtl8168evl_fix_tso(skb, features); + + if (transport_offset > GTTCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_ALL_TSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* work around hw bug on some chip versions */ + if (skb->len < ETH_ZLEN) + features &= ~NETIF_F_CSUM_MASK; + + if (rtl_quirk_packet_padto(tp, skb)) + features &= ~NETIF_F_CSUM_MASK; + + if (transport_offset > TCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_CSUM_MASK; + } + + return vlan_features_check(skb, features); +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int pci_status_errs; + u16 pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + + pci_status_errs = pci_status_get_and_clear_errors(pdev); + + if (net_ratelimit()) + netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n", + pci_cmd, pci_status_errs); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, + int budget) +{ + unsigned int dirty_tx, bytes_compl = 0, pkts_compl = 0; + struct sk_buff *skb; + + dirty_tx = tp->dirty_tx; + + while (READ_ONCE(tp->cur_tx) != dirty_tx) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + u32 status; + + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + skb = tp->tx_skb[entry].skb; + rtl8169_unmap_tx_skb(tp, entry); + + if (skb) { + pkts_compl++; + bytes_compl += skb->len; + napi_consume_skb(skb, budget); + } + dirty_tx++; + } + + if (tp->dirty_tx != dirty_tx) { + netdev_completed_queue(dev, pkts_compl, bytes_compl); + dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl); + + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_store_mb(tp->dirty_tx, dirty_tx); + if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) + netif_wake_queue(dev); + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + * If skb is NULL then we come here again once a tx irq is + * triggered after the last fragment is marked transmitted. + */ + if (tp->cur_tx != dirty_tx && skb) + rtl8169_doorbell(tp); + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & (RxProtoMask | RxCSFailMask); + + if (status == RxProtoTCP || status == RxProtoUDP) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget) +{ + struct device *d = tp_to_dev(tp); + int count; + + for (count = 0; count < budget; count++, tp->cur_rx++) { + unsigned int pkt_size, entry = tp->cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + struct sk_buff *skb; + const void *rx_buf; + dma_addr_t addr; + u32 status; + + status = le32_to_cpu(desc->opts1); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Rx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + if (unlikely(status & RxRES)) { + if (net_ratelimit()) + netdev_warn(dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + + if (!(dev->features & NETIF_F_RXALL)) + goto release_descriptor; + else if (status & RxRWT || !(status & (RxRUNT | RxCRC))) + goto release_descriptor; + } + + pkt_size = status & GENMASK(13, 0); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size -= ETH_FCS_LEN; + + /* The driver does not support incoming fragmented frames. + * They are seen as a symptom of over-mtu sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + + addr = le64_to_cpu(desc->addr); + rx_buf = page_address(tp->Rx_databuff[entry]); + + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(rx_buf); + skb_copy_to_linear_data(skb, rx_buf, pkt_size); + skb->tail += pkt_size; + skb->len = pkt_size; + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + rtl8169_rx_csum(skb, status); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + napi_gro_receive(&tp->napi, skb); + + dev_sw_netstats_rx_add(dev, pkt_size); +release_descriptor: + rtl8169_mark_to_asic(desc); + } + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct rtl8169_private *tp = dev_instance; + u32 status = rtl_get_events(tp); + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return IRQ_NONE; + + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(tp->dev); + goto out; + } + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + if (unlikely(status & RxFIFOOver && + tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(tp->dev); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + } + + if (napi_schedule_prep(&tp->napi)) { + rtl_irq_disable(tp); + __napi_schedule(&tp->napi); + } +out: + rtl_ack_events(tp, status); + + return IRQ_HANDLED; +} + +static void rtl_task(struct work_struct *work) +{ + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + + rtnl_lock(); + + if (!netif_running(tp->dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) { + rtl_reset_work(tp); + netif_wake_queue(tp->dev); + } +out_unlock: + rtnl_unlock(); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + int work_done; + + rtl_tx(dev, tp, budget); + + work_done = rtl_rx(dev, tp, budget); + + if (work_done < budget && napi_complete_done(napi, work_done)) + rtl_irq_enable(tp); + + return work_done; +} + +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + if (net_ratelimit()) + phy_print_status(tp->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + phy_attached_info(phydev); + + return 0; +} + +static void rtl8169_down(struct rtl8169_private *tp) +{ + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + phy_stop(tp->phydev); + + rtl8169_update_counters(tp); + + pci_clear_master(tp->pci_dev); + rtl_pci_commit(tp); + + rtl8169_cleanup(tp, true); + + rtl_prepare_power_down(tp); +} + +static void rtl8169_up(struct rtl8169_private *tp) +{ + pci_set_master(tp->pci_dev); + phy_init_hw(tp->phydev); + phy_resume(tp->phydev); + rtl8169_init_phy(tp); + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_reset_work(tp); + + phy_start(tp->phydev); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + netif_stop_queue(dev); + rtl8169_down(tp); + rtl8169_rx_clear(tp); + + cancel_work_sync(&tp->wk.work); + + free_irq(pci_irq_vector(pdev, 0), tp); + + phy_disconnect(tp->phydev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned long irqflags; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto out; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(tp); + if (retval < 0) + goto err_free_rx_1; + + rtl_request_firmware(tp); + + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED; + retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, + irqflags, dev->name, tp); + if (retval < 0) + goto err_release_fw_2; + + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + + rtl8169_up(tp); + rtl8169_init_counter_offsets(tp); + netif_start_queue(dev); +out: + pm_runtime_put_sync(&pdev->dev); + + return retval; + +err_free_irq: + free_irq(pci_irq_vector(pdev, 0), tp); +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + goto out; +} + +static void +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + struct rtl8169_counters *counters = tp->counters; + + pm_runtime_get_noresume(&pdev->dev); + + netdev_stats_to_stats64(stats, &dev->stats); + dev_fetch_sw_netstats(stats, dev->tstats); + + /* + * Fetch additional counter values missing in stats collected by driver + * from tally counters. + */ + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(tp); + + /* + * Subtract values fetched during initalization. + * See rtl8169_init_counter_offsets for a description why we do that. + */ + stats->tx_errors = le64_to_cpu(counters->tx_errors) - + le64_to_cpu(tp->tc_offset.tx_errors); + stats->collisions = le32_to_cpu(counters->tx_multi_collision) - + le32_to_cpu(tp->tc_offset.tx_multi_collision); + stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - + le16_to_cpu(tp->tc_offset.tx_aborted); + stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) - + le16_to_cpu(tp->tc_offset.rx_missed); + + pm_runtime_put_noidle(&pdev->dev); +} + +static void rtl8169_net_suspend(struct rtl8169_private *tp) +{ + netif_device_detach(tp->dev); + + if (netif_running(tp->dev)) + rtl8169_down(tp); +} + +#ifdef CONFIG_PM + +static int rtl8169_runtime_resume(struct device *dev) +{ + struct rtl8169_private *tp = dev_get_drvdata(dev); + + rtl_rar_set(tp, tp->dev->dev_addr); + __rtl8169_set_wol(tp, tp->saved_wolopts); + + if (tp->TxDescArray) + rtl8169_up(tp); + + netif_device_attach(tp->dev); + + return 0; +} + +static int __maybe_unused rtl8169_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + rtnl_lock(); + rtl8169_net_suspend(tp); + if (!device_may_wakeup(tp_to_dev(tp))) + clk_disable_unprepare(tp->clk); + rtnl_unlock(); + + return 0; +} + +static int __maybe_unused rtl8169_resume(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!device_may_wakeup(tp_to_dev(tp))) + clk_prepare_enable(tp->clk); + + /* Reportedly at least Asus X453MA truncates packets otherwise */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37) + rtl_init_rxcfg(tp); + + return rtl8169_runtime_resume(device); +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!tp->TxDescArray) { + netif_device_detach(tp->dev); + return 0; + } + + rtnl_lock(); + __rtl8169_set_wol(tp, WAKE_PHY); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev)) + pm_schedule_suspend(device, 10000); + + return -EBUSY; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume) + SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume, + rtl8169_runtime_idle) +}; + +#endif /* CONFIG_PM */ + +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(tp, ChipCmd, CmdRxEnb); + rtl_pci_commit(tp); + break; + default: + break; + } +} + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + rtnl_lock(); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + /* Restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); + + if (system_state == SYSTEM_POWER_OFF) { + if (tp->saved_wolopts) + rtl_wol_shutdown_quirk(tp); + + pci_wake_from_d3(pdev, tp->saved_wolopts); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + unregister_netdev(tp->dev); + + if (tp->dash_type != RTL_DASH_NONE) + rtl8168_driver_stop(tp); + + rtl_release_firmware(tp); + + /* restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_features_check = rtl8169_features_check, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static void rtl_set_irq_mask(struct rtl8169_private *tp) +{ + tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver; + else if (tp->mac_version == RTL_GIGA_MAC_VER_11) + /* special workaround needed */ + tp->irq_mask |= RxFIFOOver; + else + tp->irq_mask |= RxOverflow; +} + +static int rtl_alloc_irq(struct rtl8169_private *tp) +{ + unsigned int flags; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + rtl_unlock_config_regs(tp); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); + rtl_lock_config_regs(tp); + fallthrough; + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: + flags = PCI_IRQ_LEGACY; + break; + default: + flags = PCI_IRQ_ALL_TYPES; + break; + } + + return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); +} + +static void rtl_read_mac_address(struct rtl8169_private *tp, + u8 mac_addr[ETH_ALEN]) +{ + /* Get MAC address */ + if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) { + u32 value; + + value = rtl_eri_read(tp, 0xe0); + put_unaligned_le32(value, mac_addr); + value = rtl_eri_read(tp, 0xe4); + put_unaligned_le16(value, mac_addr + 4); + } else if (rtl_is_8125(tp)) { + rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP); + } +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + return RTL_R8(tp, MCU) & LINK_LIST_RDY; +} + +static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp) +{ + rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42); +} + +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_MAC_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = devm_mdiobus_register(&pdev->dev, new_bus); + if (ret) + return ret; + + tp->phydev = mdiobus_get_phy(new_bus, 0); + if (!tp->phydev) { + return -ENODEV; + } else if (!tp->phydev->drv) { + /* Most chip versions fail with the genphy driver. + * Therefore ensure that the dedicated PHY driver is loaded. + */ + dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n", + tp->phydev->phy_id); + return -EUNATCH; + } + + tp->phydev->mac_managed_pm = 1; + + phy_support_asym_pause(tp->phydev); + + /* PHY will be woken up in rtl_open() */ + phy_suspend(tp->phydev); + + return 0; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15)); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_init_8125(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0); + r8168_mac_ocp_write(tp, 0xc0a6, 0x0150); + r8168_mac_ocp_write(tp, 0xc01e, 0x5555); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53: + rtl8168ep_stop_cmac(tp); + fallthrough; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + rtl_hw_init_8125(tp); + break; + default: + break; + } +} + +static int rtl_jumbo_max(struct rtl8169_private *tp) +{ + /* Non-GBit versions don't support jumbo frames */ + if (!tp->supports_gmii) + return 0; + + switch (tp->mac_version) { + /* RTL8169 */ + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + return JUMBO_7K; + /* RTL8168b */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + return JUMBO_4K; + /* RTL8168c */ + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + return JUMBO_6K; + default: + return JUMBO_9K; + } +} + +static void rtl_disable_clk(void *data) +{ + clk_disable_unprepare(data); +} + +static int rtl_get_ether_clk(struct rtl8169_private *tp) +{ + struct device *d = tp_to_dev(tp); + struct clk *clk; + int rc; + + clk = devm_clk_get(d, "ether_clk"); + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -ENOENT) + /* clk-core allows NULL (for suspend / resume) */ + rc = 0; + else + dev_err_probe(d, rc, "failed to get clk\n"); + } else { + tp->clk = clk; + rc = clk_prepare_enable(clk); + if (rc) + dev_err(d, "failed to enable clk: %d\n", rc); + else + rc = devm_add_action_or_reset(d, rtl_disable_clk, clk); + } + + return rc; +} + +static void rtl_init_mac_address(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u8 *mac_addr = dev->dev_addr; + int rc; + + rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); + if (!rc) + goto done; + + rtl_read_mac_address(tp, mac_addr); + if (is_valid_ether_addr(mac_addr)) + goto done; + + rtl_read_mac_from_reg(tp, mac_addr, MAC0); + if (is_valid_ether_addr(mac_addr)) + goto done; + + eth_hw_addr_random(dev); + dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); +done: + rtl_rar_set(tp, mac_addr); +} + +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct rtl8169_private *tp; + int jumbo_max, region, rc; + enum mac_version chipset; + struct net_device *dev; + u16 xid; + + dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; + tp->eee_adv = -1; + tp->ocp_base = OCP_STD_PHY_BASE; + + dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, + struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + /* Get the *optional* external "ether_clk" used on some boards */ + rc = rtl_get_ether_clk(tp); + if (rc) + return rc; + + /* Disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + */ + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1); + tp->aspm_manageable = !rc; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pcim_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "enable failure\n"); + return rc; + } + + if (pcim_set_mwi(pdev) < 0) + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); + + /* use first MMIO region */ + region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; + if (region < 0) { + dev_err(&pdev->dev, "no MMIO resource found\n"); + return -ENODEV; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); + return -ENODEV; + } + + rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME); + if (rc < 0) { + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); + return rc; + } + + tp->mmio_addr = pcim_iomap_table(pdev)[region]; + + xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf; + + /* Identify chip attached to board */ + chipset = rtl8169_get_mac_version(xid, tp->supports_gmii); + if (chipset == RTL_GIGA_MAC_NONE) { + dev_err(&pdev->dev, "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n", xid); + return -ENODEV; + } + + tp->mac_version = chipset; + + tp->dash_type = rtl_check_dash(tp); + + tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK; + + if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev->features |= NETIF_F_HIGHDMA; + + rtl_init_rxcfg(tp); + + rtl8169_irq_mask_and_ack(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rc = rtl_alloc_irq(tp); + if (rc < 0) { + dev_err(&pdev->dev, "Can't allocate interrupt\n"); + return rc; + } + + INIT_WORK(&tp->wk.work, rtl_task); + + rtl_init_mac_address(tp); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* Disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (rtl_chip_supports_csum_v2(tp)) + dev->hw_features |= NETIF_F_IPV6_CSUM; + + dev->features |= dev->hw_features; + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ + if (rtl_chip_supports_csum_v2(tp)) { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; + } else { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; + } + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* configure chip for default features */ + rtl8169_set_features(dev, dev->features); + + rtl_set_d3_pll_down(tp, true); + + jumbo_max = rtl_jumbo_max(tp); + if (jumbo_max) + dev->max_mtu = jumbo_max; + + rtl_set_irq_mask(tp); + + tp->fw_name = rtl_chip_infos[chipset].fw_name; + + tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, + GFP_KERNEL); + if (!tp->counters) + return -ENOMEM; + + pci_set_drvdata(pdev, tp); + + rc = r8169_mdio_register(tp); + if (rc) + return rc; + + rc = register_netdev(dev); + if (rc) + return rc; + + netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n", + rtl_chip_infos[chipset].name, dev->dev_addr, xid, + pci_irq_vector(pdev, 0)); + + if (jumbo_max) + netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n", + jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? + "ok" : "ko"); + + if (tp->dash_type != RTL_DASH_NONE) { + netdev_info(dev, "DASH enabled\n"); + rtl8168_driver_start(tp); + } + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, + .driver.pm = pm_ptr(&rtl8169_pm_ops), +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/devices/r8169/r8169_main-5.15-ethercat.c b/devices/r8169/r8169_main-5.15-ethercat.c new file mode 100644 index 00000000..f2bc5230 --- /dev/null +++ b/devices/r8169/r8169_main-5.15-ethercat.c @@ -0,0 +1,5574 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "r8169-5.15-ethercat.h" +#include "r8169_firmware-5.15-ethercat.h" + + +#include "../ecdev.h" + + + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" +#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" +#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" +#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +#define MC_FILTER_LIMIT 32 + +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_RX_BUF_SIZE (SZ_16K - 1) +#define NUM_TX_DESC 256 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define OCP_STD_PHY_BASE 0xa400 + +#define RTL_CFG_NO_GBIT 1 + +/* write/read MMIO register */ +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg)) + +#define JUMBO_4K (4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) + +static const struct { + const char *name; + const char *fw_name; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, + [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, + [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, + [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, + [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, + [RTL_GIGA_MAC_VER_10] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e" }, + [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, + [RTL_GIGA_MAC_VER_16] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, + [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, + [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, + [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, + [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, + [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, + [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, + [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, + [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, + [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, + [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, + [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" }, + [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3}, + [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2}, + [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 }, + [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1}, + [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, + [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1}, + [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, + [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, + [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", }, + [RTL_GIGA_MAC_VER_60] = {"RTL8125A" }, + [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, + /* reserve 62 for CFG_METHOD_4 in the vendor driver */ + [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, +}; + +static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502) }, + { PCI_VDEVICE(REALTEK, 0x2600) }, + { PCI_VDEVICE(REALTEK, 0x8129) }, + { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT }, + { PCI_VDEVICE(REALTEK, 0x8161) }, + { PCI_VDEVICE(REALTEK, 0x8162) }, + { PCI_VDEVICE(REALTEK, 0x8167) }, + { PCI_VDEVICE(REALTEK, 0x8168) }, + { PCI_VDEVICE(NCUBE, 0x8168) }, + { PCI_VDEVICE(REALTEK, 0x8169) }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 }, + { PCI_VDEVICE(DLINK, 0x4300) }, + { PCI_VDEVICE(DLINK, 0x4302) }, + { PCI_VDEVICE(AT, 0xc107) }, + { PCI_VDEVICE(USR, 0x0116) }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 }, + { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 }, + { PCI_VDEVICE(REALTEK, 0x8125) }, + { PCI_VDEVICE(REALTEK, 0x3000) }, + {} +}; + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + +#define RTL_COALESCE_TX_USECS GENMASK(15, 12) +#define RTL_COALESCE_TX_FRAMES GENMASK(11, 8) +#define RTL_COALESCE_RX_USECS GENMASK(7, 4) +#define RTL_COALESCE_RX_FRAMES GENMASK(3, 0) + +#define RTL_COALESCE_T_MAX 0x0fU +#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_T_MAX * 4) + + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, + FuncForceEvent = 0xfc, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0000f000 +#define CSIAR_ADDR_MASK 0x00000fff + PMCH = 0x6f, +#define D3COLD_NO_PLL_DOWN BIT(7) +#define D3HOT_NO_PLL_DOWN BIT(6) +#define D3_NO_PLL_DOWN (BIT(7) | BIT(6)) + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl8125_registers { + IntrMask_8125 = 0x38, + IntrStatus_8125 = 0x3c, + TxPoll_8125 = 0x90, + MAC0_BKP = 0x19e0, + EEE_TXIDLE_TIMER_8125 = 0x6048, +}; + +#define RX_VLAN_INNER_8125 BIT(22) +#define RX_VLAN_OUTER_8125 BIT(23) +#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125) + +#define RX_FETCH_DFLT_8125 (8 << 27) + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, +#define RX_CONFIG_ACCEPT_ERR_MASK 0x30 + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_OK_MASK 0x0f +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + EnAnaPLL = (1 << 14), // 8169 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), +#define INTT_MASK GENMASK(1, 0) +#define CPCMD_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* ResetCounterCommand */ + CounterReset = 0x1, + + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* First doubleword. */ + TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ + TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7f + + /* Second doubleword. */ +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ff +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ + TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 0/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + +#define RxCSFailMask (IPFail | UDPFail | TCPFail) + + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RTL_GSO_MAX_SIZE_V1 32000 +#define RTL_GSO_MAX_SEGS_V1 24 +#define RTL_GSO_MAX_SIZE_V2 64000 +#define RTL_GSO_MAX_SEGS_V2 64 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED = 0, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_MAX +}; + +enum rtl_dash_type { + RTL_DASH_NONE, + RTL_DASH_DP, + RTL_DASH_EP, +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + enum rtl_dash_type dash_type; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + u16 cp_cmd; + u32 irq_mask; + struct clk *clk; + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct work_struct work; + } wk; + + unsigned supports_gmii:1; + unsigned aspm_manageable:1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + int eee_adv; + + const char *fw_name; + struct rtl_fw *rtl_fw; + + u32 ocp_base; + + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver (EtherCAT enabled)"); +MODULE_SOFTDEP("pre: realtek"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8168FP_3); +MODULE_FIRMWARE(FIRMWARE_8107E_1); +MODULE_FIRMWARE(FIRMWARE_8107E_2); +MODULE_FIRMWARE(FIRMWARE_8125A_3); +MODULE_FIRMWARE(FIRMWARE_8125B_2); + +static inline struct device *tp_to_dev(struct rtl8169_private *tp) +{ + return &tp->pci_dev->dev; +} + +static void rtl_lock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Lock); +} + +static void rtl_unlock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); +} + +static void rtl_pci_commit(struct rtl8169_private *tp) +{ + /* Read an arbitrary register to commit a preceding PCI write */ + RTL_R8(tp, ChipCmd); +} + +static bool rtl_is_8125(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_60; +} + +static bool rtl_is_8168evl_up(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_39 && + tp->mac_version <= RTL_GIGA_MAC_VER_53; +} + +static bool rtl_supports_eee(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_37 && + tp->mac_version != RTL_GIGA_MAC_VER_39; +} + +static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + mac[i] = RTL_R8(tp, reg + i); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + unsigned long usecs, int n, bool high) +{ + int i; + + for (i = 0; i < n; i++) { + if (c->check(tp) == high) + return true; + fsleep(usecs); + } + + if (net_ratelimit()) + netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n", + c->msg, !high, n, usecs); + return false; +} + +static bool rtl_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, true); +} + +static bool rtl_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) +{ + /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ + if (type == ERIAR_OOB && + (tp->mac_version == RTL_GIGA_MAC_VER_52 || + tp->mac_version == RTL_GIGA_MAC_VER_53)) + *cmd |= 0xf70 << 18; +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + return RTL_R32(tp, ERIAR) & ERIAR_FLAG; +} + +static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + u32 cmd = ERIAR_WRITE_CMD | type | mask | addr; + + if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask)) + return; + + RTL_W32(tp, ERIDR, val); + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val) +{ + _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC); +} + +static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr; + + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(tp, ERIDR) : ~0; +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr) +{ + return _rtl_eri_read(tp, addr, ERIAR_EXGMAC); +} + +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m) +{ + u32 val = rtl_eri_read(tp, addr); + + rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p); +} + +static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p) +{ + rtl_w0w1_eri(tp, addr, p, 0); +} + +static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m) +{ + rtl_w0w1_eri(tp, addr, 0, m); +} + +static bool rtl_ocp_reg_failure(u32 reg) +{ + return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg); +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, GPHY_OCP, reg << 15); + + return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, OCPDR, reg << 15); + + return RTL_R32(tp, OCPDR); +} + +static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, + u16 set) +{ + u16 data = r8168_mac_ocp_read(tp, reg); + + r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); +} + +/* Work around a hw issue with RTL8168g PHY, the quirk disables + * PHY MCU interrupts before PHY power-down. + */ +static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + if (value & BMCR_RESET || !(value & BMCR_PDOWN)) + rtl_eri_set_bits(tp, 0x1a8, 0xfc000000); + else + rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000); + break; + default: + break; + } +}; + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR) + rtl8168g_phy_suspend_quirk(tp, value); + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (reg == 0x1f) + return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4; + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + return RTL_R32(tp, PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + return RTL_R32(tp, OCPAR) & OCPAR_FLAG; +} + +static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) +{ + RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); +} + +static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_1_mdio_access(tp, reg, + OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); +} + +static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) +{ + r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? + RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_2_mdio_start(tp); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(tp); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + /* Work around issue with chip reporting wrong PHY ID */ + if (reg == MII_PHYSID2) + return 0xc912; + + r8168dp_2_mdio_start(tp); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(tp); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, int val) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + r8168dp_1_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_2_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168g_mdio_write(tp, location, val); + break; + default: + r8169_mdio_write(tp, location, val); + break; + } +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + return r8168dp_1_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_2_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + return r8168g_mdio_read(tp, location); + default: + return r8169_mdio_read(tp, location); + } +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff)); + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(tp, OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + return _rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + RTL_W32(tp, OCPDR, data); + RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd); + + r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_dp_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return r8168ep_ocp_read(tp, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + return RTL_R8(tp, IBISR0) & 0x20; +} + +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); + rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000); + RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); + RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_start(tp); + else + rtl8168ep_driver_start(tp); +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_stop(tp); + else + rtl8168ep_driver_stop(tp); +} + +static bool r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & BIT(15); +} + +static bool r8168ep_check_dash(struct rtl8169_private *tp) +{ + return r8168ep_ocp_read(tp, 0x128) & BIT(0); +} + +static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53: + return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE; + default: + return RTL_DASH_NONE; + } +} + +static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + if (enable) + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN); + else + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN); + break; + default: + break; + } +} + +static void rtl_reset_packet_filter(struct rtl8169_private *tp) +{ + rtl_eri_clear_bits(tp, 0xdc, BIT(0)); + rtl_eri_set_bits(tp, 0xdc, BIT(0)); +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; +} + +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u32 rtl_get_events(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + return RTL_R32(tp, IntrStatus_8125); + else + return RTL_R16(tp, IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u32 bits) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrStatus_8125, bits); + else + RTL_W16(tp, IntrStatus, bits); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, 0); + else + RTL_W16(tp, IntrMask, 0); +} + +static void rtl_irq_enable(struct rtl8169_private *tp) +{ + if (tp->ecdev) + return; + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, tp->irq_mask); + else + RTL_W16(tp, IntrMask, tp->irq_mask); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + rtl_irq_disable(tp); + rtl_ack_events(tp, 0xffffffff); + rtl_pci_commit(tp); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else if (phydev->speed == SPEED_100) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + rtl_reset_packet_filter(tp); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (phydev->speed == SPEED_10) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + } + } +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + wol->supported = WAKE_ANY; + wol->wolopts = tp->saved_wolopts; +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } + }; + unsigned int i, tmp = ARRAY_SIZE(cfg); + u8 options; + + rtl_unlock_config_regs(tp); + + if (rtl_is_8168evl_up(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2); + else + rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2); + } else if (rtl_is_8125(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0)); + else + r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); + } + + for (i = 0; i < tmp; i++) { + options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(tp, cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + options = RTL_R8(tp, Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(tp, Config1, options); + break; + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + options = RTL_R8(tp, Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(tp, Config2, options); + break; + default: + break; + } + + rtl_lock_config_regs(tp); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); + rtl_set_d3_pll_down(tp, !wolopts); + tp->dev->wol_enabled = wolopts ? 1 : 0; +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + + tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); + + return 0; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (rtl_fw) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > ETH_DATA_LEN && + tp->mac_version > RTL_GIGA_MAC_VER_06) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO); + + return features; +} + +static void rtl_set_rx_config_features(struct rtl8169_private *tp, + netdev_features_t features) +{ + u32 rx_config = RTL_R32(tp, RxConfig); + + if (features & NETIF_F_RXALL) + rx_config |= RX_CONFIG_ACCEPT_ERR_MASK; + else + rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK; + + if (rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rx_config |= RX_VLAN_8125; + else + rx_config &= ~RX_VLAN_8125; + } + + RTL_W32(tp, RxConfig, rx_config); +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_set_rx_config_features(tp, features); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (!rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + } + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; + + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); +} + +static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) +{ + u32 cmd = lower_32_bits(tp->counters_phys_addr); + + RTL_W32(tp, CounterAddrHigh, upper_32_bits(tp->counters_phys_addr)); + rtl_pci_commit(tp); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); + + rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); +} + +static void rtl8169_update_counters(struct rtl8169_private *tp) +{ + u8 val = RTL_R8(tp, ChipCmd); + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. If 0xff chip may be in a PCI power-save state. + */ + if (val & CmdRxEnb && val != 0xff) + rtl8169_do_counters(tp, CounterDump); +} + +static void rtl8169_init_counter_offsets(struct rtl8169_private *tp) +{ + struct rtl8169_counters *counters = tp->counters; + + /* + * rtl8169_init_counter_offsets is called from rtl_open. On chip + * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only + * reset by a power cycle, while the counter values collected by the + * driver are reset at every driver unload/load cycle. + * + * To make sure the HW values returned by @get_stats64 match the SW + * values, we collect the initial values at first open(*) and use them + * as offsets to normalize the values returned by @get_stats64. + * + * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one + * for the reason stated in rtl8169_update_counters; CmdRxEnb is only + * set at open time by rtl_hw_start. + */ + + if (tp->tc_offset.inited) + return; + + if (tp->mac_version >= RTL_GIGA_MAC_VER_19) { + rtl8169_do_counters(tp, CounterReset); + } else { + rtl8169_update_counters(tp); + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.rx_missed = counters->rx_missed; + } + + tp->tc_offset.inited = true; +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters; + + counters = tp->counters; + rtl8169_update_counters(tp); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +/* + * Interrupt coalescing + * + * > 1 - the availability of the IntrMitigate (0xe2) register through the + * > 8169, 8168 and 810x line of chipsets + * + * 8169, 8168, and 8136(810x) serial chipsets support it. + * + * > 2 - the Tx timer unit at gigabit speed + * + * The unit of the timer depends on both the speed and the setting of CPlusCmd + * (0xe0) bit 1 and bit 0. + * + * For 8169 + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 320ns 2.56us 40.96us + * 0 1 2.56us 20.48us 327.7us + * 1 0 5.12us 40.96us 655.4us + * 1 1 10.24us 81.92us 1.31ms + * + * For the other + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 5us 2.56us 40.96us + * 0 1 40us 20.48us 327.7us + * 1 0 80us 40.96us 655.4us + * 1 1 160us 81.92us 1.31ms + */ + +/* rx/tx scale factors for all CPlusCmd[0:1] cases */ +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +/* produce array with base delay *1, *8, *8*2, *8*2*2 */ +#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) } + +static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { + { SPEED_1000, COALESCE_DELAY(320) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; + +static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { + { SPEED_1000, COALESCE_DELAY(5000) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; +#undef COALESCE_DELAY + +/* get rx/tx scale vector corresponding to current speed */ +static const struct rtl_coalesce_info * +rtl_coalesce_info(struct rtl8169_private *tp) +{ + const struct rtl_coalesce_info *ci; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + ci = rtl_coalesce_info_8169; + else + ci = rtl_coalesce_info_8168_8136; + + /* if speed is unknown assume highest one */ + if (tp->phydev->speed == SPEED_UNKNOWN) + return ci; + + for (; ci->speed; ci++) { + if (tp->phydev->speed == ci->speed) + return ci; + } + + return ERR_PTR(-ELNRNG); +} + +static int rtl_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rtl8169_private *tp = netdev_priv(dev); + const struct rtl_coalesce_info *ci; + u32 scale, c_us, c_fr; + u16 intrmit; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + memset(ec, 0, sizeof(*ec)); + + /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK]; + + intrmit = RTL_R16(tp, IntrMitigate); + + c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit); + ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit); + /* ethtool_coalesce states usecs and max_frames must not both be 0 */ + ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit); + ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit); + ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + return 0; +} + +/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */ +static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec, + u16 *cp01) +{ + const struct rtl_coalesce_info *ci; + u16 i; + + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + for (i = 0; i < 4; i++) { + if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) { + *cp01 = i; + return ci->scale_nsecs[i]; + } + } + + return -ERANGE; +} + +static int rtl_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 tx_fr = ec->tx_max_coalesced_frames; + u32 rx_fr = ec->rx_max_coalesced_frames; + u32 coal_usec_max, units; + u16 w = 0, cp01 = 0; + int scale; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX) + return -ERANGE; + + coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs); + scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01); + if (scale < 0) + return scale; + + /* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it + * not only when usecs=0 because of e.g. the following scenario: + * + * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX) + * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1 + * - then user does `ethtool -C eth0 rx-usecs 100` + * + * Since ethtool sends to kernel whole ethtool_coalesce settings, + * if we want to ignore rx_frames then it has to be set to 0. + */ + if (rx_fr == 1) + rx_fr = 0; + if (tx_fr == 1) + tx_fr = 0; + + /* HW requires time limit to be set if frame limit is set */ + if ((tx_fr && !ec->tx_coalesce_usecs) || + (rx_fr && !ec->rx_coalesce_usecs)) + return -EINVAL; + + w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4)); + w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4)); + + units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units); + units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units); + + RTL_W16(tp, IntrMitigate, w); + + /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */ + if (rtl_is_8168evl_up(tp)) { + if (!rx_fr && !tx_fr) + /* disable packet counter */ + tp->cp_cmd |= PktCntrDisable; + else + tp->cp_cmd &= ~PktCntrDisable; + } + + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + return phy_ethtool_get_eee(tp->phydev, data); +} + +static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_eee(tp->phydev, data); + + if (!ret) + tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, + MDIO_AN_EEE_ADV); + return ret; +} + +static void rtl8169_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *data) +{ + data->rx_max_pending = NUM_RX_DESC; + data->rx_pending = NUM_RX_DESC; + data->tx_max_pending = NUM_TX_DESC; + data->tx_pending = NUM_TX_DESC; +} + +static void rtl8169_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + bool tx_pause, rx_pause; + + phy_get_pause(tp->phydev, &tx_pause, &rx_pause); + + data->autoneg = tp->phydev->autoneg; + data->tx_pause = tx_pause ? 1 : 0; + data->rx_pause = rx_pause ? 1 : 0; +} + +static int rtl8169_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN) + return -EOPNOTSUPP; + + phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause); + + return 0; +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_coalesce = rtl_get_coalesce, + .set_coalesce = rtl_set_coalesce, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, + .nway_reset = phy_ethtool_nway_reset, + .get_eee = rtl8169_get_eee, + .set_eee = rtl8169_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = rtl8169_get_ringparam, + .get_pauseparam = rtl8169_get_pauseparam, + .set_pauseparam = rtl8169_set_pauseparam, +}; + +static void rtl_enable_eee(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int adv; + + /* respect EEE advertisement the user may have set */ + if (tp->eee_adv >= 0) + adv = tp->eee_adv; + else + adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + + if (adv >= 0) + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); +} + +static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) +{ + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; + } mac_info[] = { + /* 8125B family. */ + { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, + + /* 8125A family. */ + { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, + { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + + /* RTL8117 */ + { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 }, + { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 }, + + /* 8168EP family. */ + { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, + { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, + { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, + + /* 8168H family. */ + { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 }, + { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, + + /* 8168G family. */ + { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, + { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 }, + { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, + { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 }, + { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, + { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 }, + { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 }, + { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 }, + { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + /* It seems this early RTL8168dp version never made it to + * the wild. Let's see whether somebody complains, if not + * we'll remove support for this chip version completely. + * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, + */ + { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, + { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 }, + { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 }, + { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 }, + { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 }, + { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 }, + { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 }, + { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 }, + { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, + { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, + { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 }, + { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 }, + { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 }, + { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 }, + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 }, + { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 }, + { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 }, + { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR + * Not even r8101 vendor driver knows these id's, + * so let's disable detection for now. -- HK + * { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 }, + * { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 }, + */ + + /* 8110 family. */ + { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 }, + { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 }, + { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 }, + { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 }, + { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 }, + + /* Catch-all */ + { 0x000, 0x000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + enum mac_version ver; + + while ((xid & p->mask) != p->val) + p++; + ver = p->ver; + + if (ver != RTL_GIGA_MAC_NONE && !gmii) { + if (ver == RTL_GIGA_MAC_VER_42) + ver = RTL_GIGA_MAC_VER_43; + else if (ver == RTL_GIGA_MAC_VER_45) + ver = RTL_GIGA_MAC_VER_47; + else if (ver == RTL_GIGA_MAC_VER_46) + ver = RTL_GIGA_MAC_VER_48; + } + + return ver; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (tp->rtl_fw) { + rtl_fw_release_firmware(tp->rtl_fw); + kfree(tp->rtl_fw); + tp->rtl_fw = NULL; + } +} + +void r8169_apply_firmware(struct rtl8169_private *tp) +{ + int val; + + /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ + if (tp->rtl_fw) { + rtl_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + + /* PHY soft reset may still be in progress */ + phy_read_poll_timeout(tp->phydev, MII_BMCR, val, + !(val & BMCR_RESET), + 50000, 600000, true); + } +} + +static void rtl8168_config_eee_mac(struct rtl8169_private *tp) +{ + /* Adjust EEE LED frequency */ + if (tp->mac_version != RTL_GIGA_MAC_VER_38) + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + rtl_eri_set_bits(tp, 0x1b0, 0x0003); +} + +static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) +{ + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); + r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); +} + +static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp) +{ + RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20); +} + +static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) +{ + rtl8125_set_eee_txidle_timer(tp); + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr)); + rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4)); + rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16); + rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2)); +} + +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) +{ + u16 data1, data2, ioffset; + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data1 = r8168_mac_ocp_read(tp, 0xdd02); + data2 = r8168_mac_ocp_read(tp, 0xdd00); + + ioffset = (data2 >> 1) & 0x7ff8; + ioffset |= data2 & 0x0007; + if (data1 & BIT(7)) + ioffset |= BIT(15); + + return ioffset; +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + set_bit(flag, tp->wk.flags); + if (!tp->ecdev) + schedule_work(&tp->wk.work); +} + +static void rtl8169_init_phy(struct rtl8169_private *tp) +{ + r8169_hw_phy_config(tp, tp->phydev, tp->mac_version); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + /* set undocumented MAC Reg C+CR Offset 0x82h */ + RTL_W8(tp, 0x82, 0x01); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_05 && + tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE && + tp->pci_dev->subsystem_device == 0xe000) + phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b); + + /* We may have called phy_speed_down before */ + phy_speed_up(tp->phydev); + + if (rtl_supports_eee(tp)) + rtl_enable_eee(tp); + + genphy_soft_reset(tp->phydev); +} + +static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_unlock_config_regs(tp); + + RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4)); + rtl_pci_commit(tp); + + RTL_W32(tp, MAC0, get_unaligned_le32(addr)); + rtl_pci_commit(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + rtl_lock_config_regs(tp); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static void rtl_wol_enable_rx(struct rtl8169_private *tp) +{ + if (tp->mac_version >= RTL_GIGA_MAC_VER_25) + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); +} + +static void rtl_prepare_power_down(struct rtl8169_private *tp) +{ + if (tp->dash_type != RTL_DASH_NONE) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (device_may_wakeup(tp_to_dev(tp))) { + phy_speed_down(tp->phydev, false); + rtl_wol_enable_rx(tp); + } +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); + break; + default: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x24); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); +} + +static void rtl_jumbo_config(struct rtl8169_private *tp) +{ + bool jumbo = tp->dev->mtu > ETH_DATA_LEN; + int readrq = 4096; + + rtl_unlock_config_regs(tp); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + if (jumbo) { + readrq = 512; + r8168b_1_hw_jumbo_enable(tp); + } else { + r8168b_1_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: + if (jumbo) { + readrq = 512; + r8168c_hw_jumbo_enable(tp); + } else { + r8168c_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: + if (jumbo) + r8168dp_hw_jumbo_enable(tp); + else + r8168dp_hw_jumbo_disable(tp); + break; + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: + if (jumbo) + r8168e_hw_jumbo_enable(tp); + else + r8168e_hw_jumbo_disable(tp); + break; + default: + break; + } + rtl_lock_config_regs(tp); + + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) + pcie_set_readrq(tp->pci_dev, readrq); + + /* Chip doesn't support pause in jumbo mode */ + if (jumbo) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising); + phy_start_aneg(tp->phydev); + } +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + return RTL_R8(tp, ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + RTL_W8(tp, ChipCmd, CmdReset); + + rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + + /* firmware loaded already or no firmware available */ + if (tp->rtl_fw || !tp->fw_name) + return; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + return; + + rtl_fw->phy_write = rtl_writephy; + rtl_fw->phy_read = rtl_readphy; + rtl_fw->mac_mcu_write = mac_mcu_write; + rtl_fw->mac_mcu_read = mac_mcu_read; + rtl_fw->fw_name = tp->fw_name; + rtl_fw->dev = tp_to_dev(tp); + + if (rtl_fw_request_firmware(rtl_fw)) + kfree(rtl_fw); + else + tp->rtl_fw = rtl_fw; +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + return RTL_R8(tp, TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond_2) +{ + /* IntrMitigate has new functionality on RTL8125 */ + return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103; +} + +static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_63: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); + break; + default: + break; + } +} + +static void rtl_enable_rxdvgate(struct rtl8169_private *tp) +{ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); + fsleep(2000); + rtl_wait_txrx_fifo_empty(tp); +} + +static void rtl_set_tx_config_registers(struct rtl8169_private *tp) +{ + u32 val = TX_DMA_BURST << TxDMAShift | + InterFrameGap << TxInterFrameGapShift; + + if (rtl_is_8168evl_up(tp)) + val |= TXCFG_AUTO_FIFO; + + RTL_W32(tp, TxConfig, val); +} + +static void rtl_set_rx_max_size(struct rtl8169_private *tp) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static void rtl8169_set_magic_reg(struct rtl8169_private *tp) +{ + u32 val; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + val = 0x000fff00; + else if (tp->mac_version == RTL_GIGA_MAC_VER_06) + val = 0x00ffff00; + else + return; + + if (RTL_R8(tp, Config2) & PCI_Clock_66MHz) + val |= 0xff; + + RTL_W32(tp, 0x7c, val); +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast; + /* Multicast hash filter */ + u32 mc_filter[2] = { 0xffffffff, 0xffffffff }; + struct rtl8169_private *tp = netdev_priv(dev); + u32 tmp; + + if (dev->flags & IFF_PROMISC) { + rx_mode |= AcceptAllPhys; + } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || + dev->flags & IFF_ALLMULTI || + tp->mac_version == RTL_GIGA_MAC_VER_35) { + /* accept all multicasts */ + } else if (netdev_mc_empty(dev)) { + rx_mode &= ~AcceptMulticast; + } else { + struct netdev_hw_addr *ha; + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + u32 bit_nr = eth_hw_addr_crc(ha) >> 26; + mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31); + } + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); + } + } + + RTL_W32(tp, MAR0 + 4, mc_filter[1]); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); + + tmp = RTL_R32(tp, RxConfig); + RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + return RTL_R32(tp, CSIAR) & CSIAR_FLAG; +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE | func << 16); + + rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | + CSIAR_BYTE_ENABLE); + + return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(tp, CSIDR) : ~0; +} + +static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) +{ + struct pci_dev *pdev = tp->pci_dev; + u32 csi; + + /* According to Realtek the value at config space address 0x070f + * controls the L0s/L1 entrance latency. We try standard ECAM access + * first and if it fails fall back to CSI. + * bit 0..2: L0: 0 = 1us, 1 = 2us .. 6 = 7us, 7 = 7us (no typo) + * bit 3..5: L1: 0 = 1us, 1 = 2us .. 6 = 64us, 7 = 64us + */ + if (pdev->cfg_size > 0x070f && + pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) + return; + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | val << 24); +} + +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) +{ + /* L0 7us, L1 16us */ + rtl_set_aspm_entry_latency(tp, 0x27); +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void __rtl_ephy_init(struct rtl8169_private *tp, + const struct ephy_info *e, int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a)) + +static void rtl_disable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) +{ + /* work around an issue when PCI reset occurs during L2/L3 state */ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); +} + +static void rtl_enable_exit_l1(struct rtl8169_private *tp) +{ + /* Bits control which events trigger ASPM L1 exit: + * Bit 12: rxdv + * Bit 11: ltr_msg + * Bit 10: txdma_poll + * Bit 9: xadm + * Bit 8: pktavi + * Bit 7: txpla + */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + break; + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38: + rtl_eri_set_bits(tp, 0xd4, 0x0c00); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + rtl_eri_set_bits(tp, 0xd4, 0x1f80); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80); + break; + default: + break; + } +} + +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + /* Don't enable ASPM in the chip if OS can't control ASPM */ + if (enable && tp->aspm_manageable) { + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } + + udelay(10); +} + +static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat, + u16 tx_stat, u16 rx_dyn, u16 tx_dyn) +{ + /* Usage of dynamic vs. static FIFO is controlled by bit + * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known. + */ + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn); +} + +static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp, + u8 low, u8 high) +{ + /* FIFO thresholds for pause flow control */ + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high); +} + +static void rtl_hw_start_8168b(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168cp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(tp, DBG_REG, 0x20); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0020 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168c_2); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, 0x0000, 0x0048 }, + { 0x19, 0x0020, 0x0050 }, + { 0x0c, 0x0100, 0x0020 }, + { 0x10, 0x0004, 0x0000 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168d_4); + + rtl_enable_clock_request(tp); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_1); + + rtl_disable_clock_request(tp); + + /* Reset tx FIFO pointer */ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); + + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_2); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_eri_set_bits(tp, 0x1d0, BIT(1)); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl8168_config_eee_mac(tp); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_disable(tp); + + rtl_ephy_init(tp, e_info_8168f_1); +} + +static void rtl_hw_start_8168g(struct rtl8169_private *tp) +{ + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_1[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8000, 0x0000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_1); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 }, + { 0x00, 0xffff, 0x10a3 }, + { 0x06, 0xffff, 0xf050 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x4000, 0x0000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_2); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x00, 0x0000, 0x0080 }, + { 0x06, 0x0000, 0x0010 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x0000, 0x4000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8411_2); + + /* The following Realtek-provided magic fixes an issue with the RX unit + * getting confused after the PHY having been powered-down. + */ + r8168_mac_ocp_write(tp, 0xFC28, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + r8168_mac_ocp_write(tp, 0xFC30, 0x0000); + r8168_mac_ocp_write(tp, 0xFC32, 0x0000); + r8168_mac_ocp_write(tp, 0xFC34, 0x0000); + r8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + r8168_mac_ocp_write(tp, 0xFC26, 0x0000); + + r8168_mac_ocp_write(tp, 0xF800, 0xE008); + r8168_mac_ocp_write(tp, 0xF802, 0xE00A); + r8168_mac_ocp_write(tp, 0xF804, 0xE00C); + r8168_mac_ocp_write(tp, 0xF806, 0xE00E); + r8168_mac_ocp_write(tp, 0xF808, 0xE027); + r8168_mac_ocp_write(tp, 0xF80A, 0xE04F); + r8168_mac_ocp_write(tp, 0xF80C, 0xE05E); + r8168_mac_ocp_write(tp, 0xF80E, 0xE065); + r8168_mac_ocp_write(tp, 0xF810, 0xC602); + r8168_mac_ocp_write(tp, 0xF812, 0xBE00); + r8168_mac_ocp_write(tp, 0xF814, 0x0000); + r8168_mac_ocp_write(tp, 0xF816, 0xC502); + r8168_mac_ocp_write(tp, 0xF818, 0xBD00); + r8168_mac_ocp_write(tp, 0xF81A, 0x074C); + r8168_mac_ocp_write(tp, 0xF81C, 0xC302); + r8168_mac_ocp_write(tp, 0xF81E, 0xBB00); + r8168_mac_ocp_write(tp, 0xF820, 0x080A); + r8168_mac_ocp_write(tp, 0xF822, 0x6420); + r8168_mac_ocp_write(tp, 0xF824, 0x48C2); + r8168_mac_ocp_write(tp, 0xF826, 0x8C20); + r8168_mac_ocp_write(tp, 0xF828, 0xC516); + r8168_mac_ocp_write(tp, 0xF82A, 0x64A4); + r8168_mac_ocp_write(tp, 0xF82C, 0x49C0); + r8168_mac_ocp_write(tp, 0xF82E, 0xF009); + r8168_mac_ocp_write(tp, 0xF830, 0x74A2); + r8168_mac_ocp_write(tp, 0xF832, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF834, 0x74A0); + r8168_mac_ocp_write(tp, 0xF836, 0xC50E); + r8168_mac_ocp_write(tp, 0xF838, 0x9CA2); + r8168_mac_ocp_write(tp, 0xF83A, 0x1C11); + r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF83E, 0xE006); + r8168_mac_ocp_write(tp, 0xF840, 0x74F8); + r8168_mac_ocp_write(tp, 0xF842, 0x48C4); + r8168_mac_ocp_write(tp, 0xF844, 0x8CF8); + r8168_mac_ocp_write(tp, 0xF846, 0xC404); + r8168_mac_ocp_write(tp, 0xF848, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84A, 0xC403); + r8168_mac_ocp_write(tp, 0xF84C, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2); + r8168_mac_ocp_write(tp, 0xF850, 0x0C0A); + r8168_mac_ocp_write(tp, 0xF852, 0xE434); + r8168_mac_ocp_write(tp, 0xF854, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF856, 0x49D9); + r8168_mac_ocp_write(tp, 0xF858, 0xF01F); + r8168_mac_ocp_write(tp, 0xF85A, 0xC526); + r8168_mac_ocp_write(tp, 0xF85C, 0x64A5); + r8168_mac_ocp_write(tp, 0xF85E, 0x1400); + r8168_mac_ocp_write(tp, 0xF860, 0xF007); + r8168_mac_ocp_write(tp, 0xF862, 0x0C01); + r8168_mac_ocp_write(tp, 0xF864, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF866, 0x1C15); + r8168_mac_ocp_write(tp, 0xF868, 0xC51B); + r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF86C, 0xE013); + r8168_mac_ocp_write(tp, 0xF86E, 0xC519); + r8168_mac_ocp_write(tp, 0xF870, 0x74A0); + r8168_mac_ocp_write(tp, 0xF872, 0x48C4); + r8168_mac_ocp_write(tp, 0xF874, 0x8CA0); + r8168_mac_ocp_write(tp, 0xF876, 0xC516); + r8168_mac_ocp_write(tp, 0xF878, 0x74A4); + r8168_mac_ocp_write(tp, 0xF87A, 0x48C8); + r8168_mac_ocp_write(tp, 0xF87C, 0x48CA); + r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4); + r8168_mac_ocp_write(tp, 0xF880, 0xC512); + r8168_mac_ocp_write(tp, 0xF882, 0x1B00); + r8168_mac_ocp_write(tp, 0xF884, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF886, 0x1B1C); + r8168_mac_ocp_write(tp, 0xF888, 0x483F); + r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2); + r8168_mac_ocp_write(tp, 0xF88C, 0x1B04); + r8168_mac_ocp_write(tp, 0xF88E, 0xC508); + r8168_mac_ocp_write(tp, 0xF890, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF892, 0xC505); + r8168_mac_ocp_write(tp, 0xF894, 0xBD00); + r8168_mac_ocp_write(tp, 0xF896, 0xC502); + r8168_mac_ocp_write(tp, 0xF898, 0xBD00); + r8168_mac_ocp_write(tp, 0xF89A, 0x0300); + r8168_mac_ocp_write(tp, 0xF89C, 0x051E); + r8168_mac_ocp_write(tp, 0xF89E, 0xE434); + r8168_mac_ocp_write(tp, 0xF8A0, 0xE018); + r8168_mac_ocp_write(tp, 0xF8A2, 0xE092); + r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20); + r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F); + r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4); + r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3); + r8168_mac_ocp_write(tp, 0xF8AE, 0xF007); + r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0); + r8168_mac_ocp_write(tp, 0xF8B2, 0xF103); + r8168_mac_ocp_write(tp, 0xF8B4, 0xC607); + r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8B8, 0xC606); + r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8BC, 0xC602); + r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C); + r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28); + r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C); + r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00); + r8168_mac_ocp_write(tp, 0xF8C8, 0xC707); + r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00); + r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2); + r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1); + r8168_mac_ocp_write(tp, 0xF8D0, 0xC502); + r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA); + r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0); + r8168_mac_ocp_write(tp, 0xF8D8, 0xC502); + r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8DC, 0x0132); + + r8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + r8168_mac_ocp_write(tp, 0xFC2A, 0x0743); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0801); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9); + r8168_mac_ocp_write(tp, 0xFC30, 0x02FD); + r8168_mac_ocp_write(tp, 0xFC32, 0x0C25); + r8168_mac_ocp_write(tp, 0xFC34, 0x00A9); + r8168_mac_ocp_write(tp, 0xFC36, 0x012D); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x854a }, + { 0x01, 0xffff, 0x068b } + }; + int rg_saw_cnt; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168h_1); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xdc, 0x001c); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008); + r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_1[] = { + { 0x00, 0xffff, 0x10ab }, + { 0x06, 0xffff, 0xf030 }, + { 0x08, 0xffff, 0x2006 }, + { 0x0d, 0xffff, 0x1666 }, + { 0x0c, 0x3ff0, 0x0000 } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_1); + + rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_2[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20ea } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_2); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0x0000, 0x0080 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_3); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8117(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8117[] = { + { 0x19, 0x0040, 0x1100 }, + { 0x59, 0x0040, 0x1100 }, + }; + int rg_saw_cnt; + + rtl8168ep_stop_cmac(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8117); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x0010); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_write(tp, 0xea80, 0x0003); + r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + /* firmware is for MAC only */ + r8169_apply_firmware(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, FIX_NAK_1); + + RTL_W8(tp, Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + cfg1 = RTL_R8(tp, Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(tp, Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8401(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8401[] = { + { 0x01, 0xffff, 0x6fe5 }, + { 0x03, 0xffff, 0x0599 }, + { 0x06, 0xffff, 0xaf25 }, + { 0x07, 0xffff, 0x8e68 }, + }; + + rtl_ephy_init(tp, e_info_8401); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402); + + rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + rtl_hw_aspm_clkreq_enable(tp, false); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + + /* L0 7us, L1 32us - needed to avoid issues with link-up detection */ + rtl_set_aspm_entry_latency(tp, 0x2f); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond) +{ + return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13); +} + +static void rtl_hw_start_8125_common(struct rtl8169_private *tp) +{ + rtl_pcie_state_l2l3_disable(tp); + + RTL_W16(tp, 0x382, 0x221b); + RTL_W8(tp, 0x4500, 0); + RTL_W16(tp, 0x4800, 0); + + /* disable UPS */ + r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10); + + r8168_mac_ocp_write(tp, 0xc140, 0xffff); + r8168_mac_ocp_write(tp, 0xc142, 0xffff); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + /* disable new tx descriptor format */ + r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); + else + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000); + else + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); + r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); + r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); + r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); + r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); + r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); + r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); + + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001); + udelay(1); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000); + RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030); + + r8168_mac_ocp_write(tp, 0xe098, 0xc302); + + rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + rtl8125b_config_eee_mac(tp); + else + rtl8125a_config_eee_mac(tp); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + udelay(10); +} + +static void rtl_hw_start_8125a_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_1[] = { + { 0x01, 0xffff, 0xa812 }, + { 0x09, 0xffff, 0x520c }, + { 0x04, 0xffff, 0xd000 }, + { 0x0d, 0xffff, 0xf702 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x06, 0xffff, 0x001e }, + { 0x08, 0xffff, 0x3595 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x02, 0xffff, 0x6046 }, + { 0x29, 0xffff, 0xfe00 }, + { 0x23, 0xffff, 0xab62 }, + + { 0x41, 0xffff, 0xa80c }, + { 0x49, 0xffff, 0x520c }, + { 0x44, 0xffff, 0xd000 }, + { 0x4d, 0xffff, 0xf702 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x46, 0xffff, 0x001e }, + { 0x48, 0xffff, 0x3595 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x42, 0xffff, 0x6046 }, + { 0x69, 0xffff, 0xfe00 }, + { 0x63, 0xffff, 0xab62 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_1); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_2[] = { + { 0x04, 0xffff, 0xd000 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x23, 0xffff, 0xab66 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x29, 0xffff, 0xfe04 }, + + { 0x44, 0xffff, 0xd000 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x63, 0xffff, 0xab66 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x69, 0xffff, 0xfe04 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_2); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125b(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125b[] = { + { 0x0b, 0xffff, 0xa908 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x4b, 0xffff, 0xa908 }, + { 0x5e, 0xffff, 0x20eb }, + { 0x22, 0x0030, 0x0020 }, + { 0x62, 0x0030, 0x0020 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_aspm_clkreq_enable(tp, false); + + rtl_ephy_init(tp, e_info_8125b); + rtl_hw_start_8125_common(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_config(struct rtl8169_private *tp) +{ + static const rtl_generic_fct hw_configs[] = { + [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1, + [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, + [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, + [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1, + [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4, + [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2, + [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, + [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, + [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, + [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, + [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2, + [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402, + [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411, + [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106, + [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2, + [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1, + [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2, + [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1, + [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, + [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, + }; + + if (hw_configs[tp->mac_version]) + hw_configs[tp->mac_version](tp); +} + +static void rtl_hw_start_8125(struct rtl8169_private *tp) +{ + int i; + + /* disable interrupt coalescing */ + for (i = 0xa00; i < 0xb00; i += 4) + RTL_W32(tp, i, 0); + + rtl_hw_config(tp); +} + +static void rtl_hw_start_8168(struct rtl8169_private *tp) +{ + if (rtl_is_8168evl_up(tp)) + RTL_W8(tp, MaxTxPacketSize, EarlySize); + else + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + rtl_hw_config(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start_8169(struct rtl8169_private *tp) +{ + RTL_W8(tp, EarlyTxThres, NoEarlyTx); + + tp->cp_cmd |= PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) + tp->cp_cmd |= EnAnaPLL; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start(struct rtl8169_private *tp) +{ + rtl_unlock_config_regs(tp); + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + rtl_hw_start_8169(tp); + else if (rtl_is_8125(tp)) + rtl_hw_start_8125(tp); + else + rtl_hw_start_8168(tp); + + rtl_enable_exit_l1(tp); + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + rtl_lock_config_regs(tp); + + rtl_jumbo_config(tp); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + rtl_pci_commit(tp); + + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_init_rxcfg(tp); + rtl_set_tx_config_registers(tp); + rtl_set_rx_config_features(tp, tp->dev->features); + rtl_set_rx_mode(tp->dev); + rtl_irq_enable(tp); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + dev->mtu = new_mtu; + netdev_update_features(dev); + rtl_jumbo_config(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + rtl8125_set_eee_txidle_timer(tp); + break; + default: + break; + } + + return 0; +} + +static void rtl8169_mark_to_asic(struct RxDesc *desc) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts2 = 0; + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE)); +} + +static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + struct device *d = tp_to_dev(tp); + int node = dev_to_node(d); + dma_addr_t mapping; + struct page *data; + + data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE)); + if (!data) + return NULL; + + mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + netdev_err(tp->dev, "Failed to map RX DMA!\n"); + __free_pages(data, get_order(R8169_RX_BUF_SIZE)); + return NULL; + } + + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc); + + return data; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) { + dma_unmap_page(tp_to_dev(tp), + le64_to_cpu(tp->RxDescArray[i].addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE)); + tp->Rx_databuff[i] = NULL; + tp->RxDescArray[i].addr = 0; + tp->RxDescArray[i].opts1 = 0; + } +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + struct page *data; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_rx_clear(tp); + return -ENOMEM; + } + tp->Rx_databuff[i] = data; + } + + /* mark as last descriptor in the ring */ + tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd); + + return 0; +} + +static int rtl8169_init_ring(struct rtl8169_private *tp) +{ + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); + memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry) +{ + struct ring_info *tx_skb = tp->tx_skb + entry; + struct TxDesc *desc = tp->TxDescArray + entry; + + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len, + DMA_TO_DEVICE); + memset(desc, 0, sizeof(*desc)); + memset(tx_skb, 0, sizeof(*tx_skb)); +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(tp, entry); + if (!tp->ecdev && skb) + dev_consume_skb_any(skb); + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + if (!tp->ecdev) + netdev_reset_queue(tp->dev); +} + +static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) +{ + if (!tp->ecdev) + napi_disable(&tp->napi); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_net(); + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + if (going_down && tp->dev->wol_enabled) + goto no_reset; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + rtl_enable_rxdvgate(tp); + fsleep(2000); + break; + default: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + fsleep(100); + break; + } + + rtl_hw_reset(tp); +no_reset: + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + int i; + + if (!tp->ecdev) + netif_stop_queue(tp->dev); + + rtl8169_cleanup(tp, false); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i); + + if (!tp->ecdev) + napi_enable(&tp->napi); + rtl_hw_start(tp); +} + +static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len, + void *addr, unsigned int entry, bool desc_own) +{ + struct TxDesc *txd = tp->TxDescArray + entry; + struct device *d = tp_to_dev(tp); + dma_addr_t mapping; + u32 opts1; + int ret; + + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + ret = dma_mapping_error(d, mapping); + if (unlikely(ret)) { + if (net_ratelimit()) + netdev_err(tp->dev, "Failed to map TX data!\n"); + return ret; + } + + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts[1]); + + opts1 = opts[0] | len; + if (entry == NUM_TX_DESC - 1) + opts1 |= RingEnd; + if (desc_own) + opts1 |= DescOwn; + txd->opts1 = cpu_to_le32(opts1); + + tp->tx_skb[entry].len = len; + + return 0; +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + const u32 *opts, unsigned int entry) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag; + + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + void *addr = skb_frag_address(frag); + u32 len = skb_frag_size(frag); + + entry = (entry + 1) % NUM_TX_DESC; + + if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true))) + goto err_out; + } + + return 0; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_skb_is_udp(struct sk_buff *skb) +{ + int no = skb_network_offset(skb); + struct ipv6hdr *i6h, _i6h; + struct iphdr *ih, _ih; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih); + return ih && ih->protocol == IPPROTO_UDP; + case htons(ETH_P_IPV6): + i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h); + return i6h && i6h->nexthdr == IPPROTO_UDP; + default: + return false; + } +} + +#define RTL_MIN_PATCH_LEN 47 + +/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */ +static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto = 0, len = skb->len; + + if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && + rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { + unsigned int trans_data_len = skb_tail_pointer(skb) - + skb_transport_header(skb); + + if (trans_data_len >= offsetof(struct udphdr, len) && + trans_data_len < RTL_MIN_PATCH_LEN) { + u16 dest = ntohs(udp_hdr(skb)->dest); + + /* dest is a standard PTP port */ + if (dest == 319 || dest == 320) + padto = len + RTL_MIN_PATCH_LEN - trans_data_len; + } + + if (trans_data_len < sizeof(struct udphdr)) + padto = max_t(unsigned int, padto, + len + sizeof(struct udphdr) - trans_data_len); + } + + return padto; +} + +static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto; + + padto = rtl8125_quirk_udp_padto(tp, skb); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_60: + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + padto = max_t(unsigned int, padto, ETH_ZLEN); + break; + default: + break; + } + + return padto; +} + +static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) +{ + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + opts[0] |= TD_LSO; + opts[0] |= mss << TD0_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[0] |= TD0_IP_CS | TD0_TCP_CS; + else if (ip->protocol == IPPROTO_UDP) + opts[0] |= TD0_IP_CS | TD0_UDP_CS; + else + WARN_ON_ONCE(1); + } +} + +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 transport_offset = (u32)skb_transport_offset(skb); + struct skb_shared_info *shinfo = skb_shinfo(skb); + u32 mss = shinfo->gso_size; + + if (mss) { + if (shinfo->gso_type & SKB_GSO_TCPV4) { + opts[0] |= TD1_GTSENV4; + } else if (shinfo->gso_type & SKB_GSO_TCPV6) { + if (skb_cow_head(skb, 0)) + return false; + + tcp_v6_gso_csum_prep(skb); + opts[0] |= TD1_GTSENV6; + } else { + WARN_ON_ONCE(1); + } + + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= mss << TD1_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_protocol; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + opts[1] |= TD1_IPv4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts[1] |= TD1_IPv6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + opts[1] |= TD1_TCP_CS; + else if (ip_protocol == IPPROTO_UDP) + opts[1] |= TD1_UDP_CS; + else + WARN_ON_ONCE(1); + + opts[1] |= transport_offset << TCPHO_SHIFT; + } else { + unsigned int padto = rtl_quirk_packet_padto(tp, skb); + + /* skb_padto would free the skb on error */ + return !__skb_put_padto(skb, padto, false); + } + + return true; +} + +static bool rtl_tx_slots_avail(struct rtl8169_private *tp) +{ + unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC + - READ_ONCE(tp->cur_tx); + + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ + return slots_avail > MAX_SKB_FRAGS; +} + +/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ +static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + return false; + default: + return true; + } +} + +static void rtl8169_doorbell(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W16(tp, TxPoll_8125, BIT(0)); + else + RTL_W8(tp, TxPoll, NPQ); +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int frags = skb_shinfo(skb)->nr_frags; + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd_first, *txd_last; + bool stop_queue = 0, door_bell = 0; + u32 opts[2]; + + if (unlikely(!rtl_tx_slots_avail(tp))) { + if (net_ratelimit()) + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + opts[1] = rtl8169_tx_vlan_tag(skb); + opts[0] = 0; + + if (!rtl_chip_supports_csum_v2(tp)) + rtl8169_tso_csum_v1(skb, opts); + else if (!rtl8169_tso_csum_v2(tp, skb, opts)) + goto err_dma_0; + + if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data, + entry, false))) + goto err_dma_0; + + txd_first = tp->TxDescArray + entry; + + if (frags) { + if (rtl8169_xmit_frags(tp, skb, opts, entry)) + goto err_dma_1; + entry = (entry + frags) % NUM_TX_DESC; + } + + txd_last = tp->TxDescArray + entry; + txd_last->opts1 |= cpu_to_le32(LastFrag); + tp->tx_skb[entry].skb = skb; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + door_bell = tp->ecdev || __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + + txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag); + + /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */ + smp_wmb(); + + WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1); + + stop_queue = !tp->ecdev && !rtl_tx_slots_avail(tp); + if (unlikely(stop_queue)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb__after_atomic(); + if (rtl_tx_slots_avail(tp)) + netif_start_queue(dev); + door_bell = true; + } + + if (door_bell) + rtl8169_doorbell(tp); + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(tp, entry); +err_dma_0: + if (!tp->ecdev) + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + if (!tp->ecdev) + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static unsigned int rtl_last_frag_len(struct sk_buff *skb) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int nr_frags = info->nr_frags; + + if (!nr_frags) + return UINT_MAX; + + return skb_frag_size(info->frags + nr_frags - 1); +} + +/* Workaround for hw issues with TSO on RTL8168evl */ +static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb, + netdev_features_t features) +{ + /* IPv4 header has options field */ + if (vlan_get_protocol(skb) == htons(ETH_P_IP) && + ip_hdrlen(skb) > sizeof(struct iphdr)) + features &= ~NETIF_F_ALL_TSO; + + /* IPv4 TCP header has options field */ + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 && + tcp_hdrlen(skb) > sizeof(struct tcphdr)) + features &= ~NETIF_F_ALL_TSO; + + else if (rtl_last_frag_len(skb) <= 6) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static netdev_features_t rtl8169_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + int transport_offset = skb_transport_offset(skb); + struct rtl8169_private *tp = netdev_priv(dev); + + if (skb_is_gso(skb)) { + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + features = rtl8168evl_fix_tso(skb, features); + + if (transport_offset > GTTCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_ALL_TSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* work around hw bug on some chip versions */ + if (skb->len < ETH_ZLEN) + features &= ~NETIF_F_CSUM_MASK; + + if (rtl_quirk_packet_padto(tp, skb)) + features &= ~NETIF_F_CSUM_MASK; + + if (transport_offset > TCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_CSUM_MASK; + } + + return vlan_features_check(skb, features); +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int pci_status_errs; + u16 pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + + pci_status_errs = pci_status_get_and_clear_errors(pdev); + + if (net_ratelimit()) + netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n", + pci_cmd, pci_status_errs); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, + int budget) +{ + unsigned int dirty_tx, bytes_compl = 0, pkts_compl = 0; + struct sk_buff *skb; + + dirty_tx = tp->dirty_tx; + + while (READ_ONCE(tp->cur_tx) != dirty_tx) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + u32 status; + + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + skb = tp->tx_skb[entry].skb; + rtl8169_unmap_tx_skb(tp, entry); + + if (skb) { + pkts_compl++; + bytes_compl += skb->len; + if (!tp->ecdev) + napi_consume_skb(skb, budget); + } + dirty_tx++; + } + + if (tp->dirty_tx != dirty_tx) { + if (!tp->ecdev) { + netdev_completed_queue(dev, pkts_compl, bytes_compl); + dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl); + } + + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_store_mb(tp->dirty_tx, dirty_tx); + if (!tp->ecdev && netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) + netif_wake_queue(dev); + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + * If skb is NULL then we come here again once a tx irq is + * triggered after the last fragment is marked transmitted. + */ + if (tp->cur_tx != dirty_tx && skb) + rtl8169_doorbell(tp); + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & (RxProtoMask | RxCSFailMask); + + if (status == RxProtoTCP || status == RxProtoUDP) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget) +{ + struct device *d = tp_to_dev(tp); + int count; + + for (count = 0; count < budget; count++, tp->cur_rx++) { + unsigned int pkt_size, entry = tp->cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + struct sk_buff *skb; + const void *rx_buf; + dma_addr_t addr; + u32 status; + + status = le32_to_cpu(desc->opts1); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Rx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + if (unlikely(status & RxRES)) { + if (net_ratelimit()) + netdev_warn(dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + + if (!(dev->features & NETIF_F_RXALL)) + goto release_descriptor; + else if (status & RxRWT || !(status & (RxRUNT | RxCRC))) + goto release_descriptor; + } + + pkt_size = status & GENMASK(13, 0); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size -= ETH_FCS_LEN; + + /* The driver does not support incoming fragmented frames. + * They are seen as a symptom of over-mtu sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + if (!tp->ecdev) { + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + } else { + skb = NULL; + } + + addr = le64_to_cpu(desc->addr); + rx_buf = page_address(tp->Rx_databuff[entry]); + + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(rx_buf); + if (tp->ecdev) { + ecdev_receive(tp->ecdev, rx_buf, pkt_size); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + tp->ec_watchdog_jiffies = jiffies; + + } else { + skb_copy_to_linear_data(skb, rx_buf, pkt_size); + skb->tail += pkt_size; + skb->len = pkt_size; + } + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + if (!tp->ecdev) { + rtl8169_rx_csum(skb, status); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + napi_gro_receive(&tp->napi, skb); + dev_sw_netstats_rx_add(dev, pkt_size); + } +release_descriptor: + rtl8169_mark_to_asic(desc); + } + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct rtl8169_private *tp = dev_instance; + u32 status = rtl_get_events(tp); + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return IRQ_NONE; + + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(tp->dev); + goto out; + } + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + if (unlikely(status & RxFIFOOver && + tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(tp->dev); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + } + + if (napi_schedule_prep(&tp->napi)) { + rtl_irq_disable(tp); + __napi_schedule(&tp->napi); + } +out: + rtl_ack_events(tp, status); + + return IRQ_HANDLED; +} + +static void rtl_task(struct work_struct *work) +{ + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + + rtnl_lock(); + + if (!netif_running(tp->dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) { + rtl_reset_work(tp); + netif_wake_queue(tp->dev); + } +out_unlock: + rtnl_unlock(); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + int work_done; + + rtl_tx(dev, tp, budget); + + work_done = rtl_rx(dev, tp, budget); + + if (work_done < budget && napi_complete_done(napi, work_done)) + rtl_irq_enable(tp); + + return work_done; +} + +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + if (net_ratelimit()) + phy_print_status(tp->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + phy_attached_info(phydev); + + return 0; +} + +static void rtl8169_down(struct rtl8169_private *tp) +{ + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + phy_stop(tp->phydev); + + rtl8169_update_counters(tp); + + pci_clear_master(tp->pci_dev); + rtl_pci_commit(tp); + + rtl8169_cleanup(tp, true); + + rtl_prepare_power_down(tp); +} + +static void rtl8169_up(struct rtl8169_private *tp) +{ + pci_set_master(tp->pci_dev); + phy_init_hw(tp->phydev); + phy_resume(tp->phydev); + rtl8169_init_phy(tp); + if (!tp->ecdev) + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_reset_work(tp); + + phy_start(tp->phydev); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + if (!tp->ecdev) + netif_stop_queue(dev); + rtl8169_down(tp); + rtl8169_rx_clear(tp); + + cancel_work_sync(&tp->wk.work); + + if (!tp->ecdev) + free_irq(pci_irq_vector(pdev, 0), tp); + + phy_disconnect(tp->phydev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned long irqflags; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto out; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(tp); + if (retval < 0) + goto err_free_rx_1; + + rtl_request_firmware(tp); + + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED; + if (!tp->ecdev) { + retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, + irqflags, dev->name, tp); + if (retval < 0) + goto err_release_fw_2; + } + + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + + rtl8169_up(tp); + rtl8169_init_counter_offsets(tp); + if (!tp->ecdev) + netif_start_queue(dev); + else + ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + +out: + pm_runtime_put_sync(&pdev->dev); + + return retval; + +err_free_irq: + free_irq(pci_irq_vector(pdev, 0), tp); +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + goto out; +} + +static void +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + struct rtl8169_counters *counters = tp->counters; + + pm_runtime_get_noresume(&pdev->dev); + + netdev_stats_to_stats64(stats, &dev->stats); + dev_fetch_sw_netstats(stats, dev->tstats); + + /* + * Fetch additional counter values missing in stats collected by driver + * from tally counters. + */ + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(tp); + + /* + * Subtract values fetched during initalization. + * See rtl8169_init_counter_offsets for a description why we do that. + */ + stats->tx_errors = le64_to_cpu(counters->tx_errors) - + le64_to_cpu(tp->tc_offset.tx_errors); + stats->collisions = le32_to_cpu(counters->tx_multi_collision) - + le32_to_cpu(tp->tc_offset.tx_multi_collision); + stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - + le16_to_cpu(tp->tc_offset.tx_aborted); + stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) - + le16_to_cpu(tp->tc_offset.rx_missed); + + pm_runtime_put_noidle(&pdev->dev); +} + +static void rtl8169_net_suspend(struct rtl8169_private *tp) +{ + netif_device_detach(tp->dev); + + if (netif_running(tp->dev)) + rtl8169_down(tp); +} + +#ifdef CONFIG_PM + +static int rtl8169_runtime_resume(struct device *dev) +{ + struct rtl8169_private *tp = dev_get_drvdata(dev); + + rtl_rar_set(tp, tp->dev->dev_addr); + __rtl8169_set_wol(tp, tp->saved_wolopts); + + if (tp->TxDescArray) + rtl8169_up(tp); + + netif_device_attach(tp->dev); + + return 0; +} + +static int __maybe_unused rtl8169_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + rtnl_lock(); + rtl8169_net_suspend(tp); + if (!device_may_wakeup(tp_to_dev(tp))) + clk_disable_unprepare(tp->clk); + rtnl_unlock(); + + return 0; +} + +static int __maybe_unused rtl8169_resume(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + if (!device_may_wakeup(tp_to_dev(tp))) + clk_prepare_enable(tp->clk); + + /* Reportedly at least Asus X453MA truncates packets otherwise */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37) + rtl_init_rxcfg(tp); + + return rtl8169_runtime_resume(device); +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + if (!tp->TxDescArray) { + netif_device_detach(tp->dev); + return 0; + } + + rtnl_lock(); + __rtl8169_set_wol(tp, WAKE_PHY); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev)) + pm_schedule_suspend(device, 10000); + + return -EBUSY; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume) + SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume, + rtl8169_runtime_idle) +}; + +#endif /* CONFIG_PM */ + +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(tp, ChipCmd, CmdRxEnb); + rtl_pci_commit(tp); + break; + default: + break; + } +} + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + rtnl_lock(); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + /* Restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); + + if (system_state == SYSTEM_POWER_OFF) { + if (tp->saved_wolopts) + rtl_wol_shutdown_quirk(tp); + + pci_wake_from_d3(pdev, tp->saved_wolopts); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + if (tp->ecdev) { + ecdev_close(tp->ecdev); + ecdev_withdraw(tp->ecdev); + } else { + unregister_netdev(tp->dev); + } + + if (tp->dash_type != RTL_DASH_NONE) + rtl8168_driver_stop(tp); + + rtl_release_firmware(tp); + + /* restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_features_check = rtl8169_features_check, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static void rtl_set_irq_mask(struct rtl8169_private *tp) +{ + tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver; + else if (tp->mac_version == RTL_GIGA_MAC_VER_11) + /* special workaround needed */ + tp->irq_mask |= RxFIFOOver; + else + tp->irq_mask |= RxOverflow; +} + +static int rtl_alloc_irq(struct rtl8169_private *tp) +{ + unsigned int flags; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + rtl_unlock_config_regs(tp); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); + rtl_lock_config_regs(tp); + fallthrough; + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: + flags = PCI_IRQ_LEGACY; + break; + default: + flags = PCI_IRQ_ALL_TYPES; + break; + } + + return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); +} + +static void rtl_read_mac_address(struct rtl8169_private *tp, + u8 mac_addr[ETH_ALEN]) +{ + /* Get MAC address */ + if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) { + u32 value; + + value = rtl_eri_read(tp, 0xe0); + put_unaligned_le32(value, mac_addr); + value = rtl_eri_read(tp, 0xe4); + put_unaligned_le16(value, mac_addr + 4); + } else if (rtl_is_8125(tp)) { + rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP); + } +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + return RTL_R8(tp, MCU) & LINK_LIST_RDY; +} + +static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp) +{ + rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42); +} + +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_MAC_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = devm_mdiobus_register(&pdev->dev, new_bus); + if (ret) + return ret; + + tp->phydev = mdiobus_get_phy(new_bus, 0); + if (!tp->phydev) { + return -ENODEV; + } else if (!tp->phydev->drv) { + /* Most chip versions fail with the genphy driver. + * Therefore ensure that the dedicated PHY driver is loaded. + */ + dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n", + tp->phydev->phy_id); + return -EUNATCH; + } + + tp->phydev->mac_managed_pm = 1; + + phy_support_asym_pause(tp->phydev); + + /* PHY will be woken up in rtl_open() */ + phy_suspend(tp->phydev); + + return 0; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15)); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_init_8125(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0); + r8168_mac_ocp_write(tp, 0xc0a6, 0x0150); + r8168_mac_ocp_write(tp, 0xc01e, 0x5555); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53: + rtl8168ep_stop_cmac(tp); + fallthrough; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + rtl_hw_init_8125(tp); + break; + default: + break; + } +} + +static int rtl_jumbo_max(struct rtl8169_private *tp) +{ + /* Non-GBit versions don't support jumbo frames */ + if (!tp->supports_gmii) + return 0; + + switch (tp->mac_version) { + /* RTL8169 */ + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + return JUMBO_7K; + /* RTL8168b */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + return JUMBO_4K; + /* RTL8168c */ + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + return JUMBO_6K; + default: + return JUMBO_9K; + } +} + +static void rtl_disable_clk(void *data) +{ + clk_disable_unprepare(data); +} + +static int rtl_get_ether_clk(struct rtl8169_private *tp) +{ + struct device *d = tp_to_dev(tp); + struct clk *clk; + int rc; + + clk = devm_clk_get(d, "ether_clk"); + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -ENOENT) + /* clk-core allows NULL (for suspend / resume) */ + rc = 0; + else + dev_err_probe(d, rc, "failed to get clk\n"); + } else { + tp->clk = clk; + rc = clk_prepare_enable(clk); + if (rc) + dev_err(d, "failed to enable clk: %d\n", rc); + else + rc = devm_add_action_or_reset(d, rtl_disable_clk, clk); + } + + return rc; +} + +static void rtl_init_mac_address(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u8 *mac_addr = dev->dev_addr; + int rc; + + rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); + if (!rc) + goto done; + + rtl_read_mac_address(tp, mac_addr); + if (is_valid_ether_addr(mac_addr)) + goto done; + + rtl_read_mac_from_reg(tp, mac_addr, MAC0); + if (is_valid_ether_addr(mac_addr)) + goto done; + + eth_hw_addr_random(dev); + dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); +done: + rtl_rar_set(tp, mac_addr); +} + +static void ec_poll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u16 status = rtl_get_events(tp); + + if (jiffies - tp->ec_watchdog_jiffies >= 2 * HZ) { + ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + tp->ec_watchdog_jiffies = jiffies; + } + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return; + + rtl_tx(dev, tp, 100); + + rtl_rx(dev, tp, 100); + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + rtl_ack_events(tp, status); +} + + +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct rtl8169_private *tp; + int jumbo_max, region, rc; + enum mac_version chipset; + struct net_device *dev; + u16 xid; + + dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; + tp->eee_adv = -1; + tp->ocp_base = OCP_STD_PHY_BASE; + + dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, + struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + /* Get the *optional* external "ether_clk" used on some boards */ + rc = rtl_get_ether_clk(tp); + if (rc) + return rc; + + /* Disable ASPM L1 as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + */ + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1); + tp->aspm_manageable = !rc; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pcim_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "enable failure\n"); + return rc; + } + + if (pcim_set_mwi(pdev) < 0) + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); + + /* use first MMIO region */ + region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; + if (region < 0) { + dev_err(&pdev->dev, "no MMIO resource found\n"); + return -ENODEV; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); + return -ENODEV; + } + + rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME); + if (rc < 0) { + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); + return rc; + } + + tp->mmio_addr = pcim_iomap_table(pdev)[region]; + + xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf; + + /* Identify chip attached to board */ + chipset = rtl8169_get_mac_version(xid, tp->supports_gmii); + if (chipset == RTL_GIGA_MAC_NONE) { + dev_err(&pdev->dev, "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n", xid); + return -ENODEV; + } + + tp->mac_version = chipset; + + tp->dash_type = rtl_check_dash(tp); + + tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK; + + if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev->features |= NETIF_F_HIGHDMA; + + rtl_init_rxcfg(tp); + + rtl8169_irq_mask_and_ack(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rc = rtl_alloc_irq(tp); + if (rc < 0) { + dev_err(&pdev->dev, "Can't allocate interrupt\n"); + return rc; + } + + INIT_WORK(&tp->wk.work, rtl_task); + + rtl_init_mac_address(tp); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* Disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (rtl_chip_supports_csum_v2(tp)) + dev->hw_features |= NETIF_F_IPV6_CSUM; + + dev->features |= dev->hw_features; + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ + if (rtl_chip_supports_csum_v2(tp)) { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; + } else { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; + } + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* configure chip for default features */ + rtl8169_set_features(dev, dev->features); + + rtl_set_d3_pll_down(tp, true); + + jumbo_max = rtl_jumbo_max(tp); + if (jumbo_max) + dev->max_mtu = jumbo_max; + + rtl_set_irq_mask(tp); + + tp->fw_name = rtl_chip_infos[chipset].fw_name; + + tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, + GFP_KERNEL); + if (!tp->counters) + return -ENOMEM; + + pci_set_drvdata(pdev, tp); + + rc = r8169_mdio_register(tp); + if (rc) + return rc; + + tp->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ec_watchdog_jiffies = jiffies; + + if (!tp->ecdev) { + rc = register_netdev(dev); + if (rc) + return rc; + } + + netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n", + rtl_chip_infos[chipset].name, dev->dev_addr, xid, + pci_irq_vector(pdev, 0)); + + if (jumbo_max) + netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n", + jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? + "ok" : "ko"); + + if (tp->dash_type != RTL_DASH_NONE) { + netdev_info(dev, "DASH enabled\n"); + rtl8168_driver_start(tp); + } + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_sync(&pdev->dev); + + if (tp->ecdev) { + rc = ecdev_open(tp->ecdev); + if (rc) { + ecdev_withdraw(tp->ecdev); + return rc; + } + } + + return 0; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, + .driver.pm = pm_ptr(&rtl8169_pm_ops), +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/devices/r8169/r8169_main-5.15-orig.c b/devices/r8169/r8169_main-5.15-orig.c new file mode 100644 index 00000000..2918947d --- /dev/null +++ b/devices/r8169/r8169_main-5.15-orig.c @@ -0,0 +1,5482 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "r8169.h" +#include "r8169_firmware.h" + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" +#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" +#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" +#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +#define MC_FILTER_LIMIT 32 + +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_RX_BUF_SIZE (SZ_16K - 1) +#define NUM_TX_DESC 256 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define OCP_STD_PHY_BASE 0xa400 + +#define RTL_CFG_NO_GBIT 1 + +/* write/read MMIO register */ +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg)) + +#define JUMBO_4K (4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) + +static const struct { + const char *name; + const char *fw_name; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, + [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, + [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, + [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, + [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, + [RTL_GIGA_MAC_VER_10] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e" }, + [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, + [RTL_GIGA_MAC_VER_16] = {"RTL8101e" }, + [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, + [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, + [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, + [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, + [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, + [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, + [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, + [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, + [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, + [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, + [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, + [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" }, + [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3}, + [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2}, + [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 }, + [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1}, + [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, + [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1}, + [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, + [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, + [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", }, + [RTL_GIGA_MAC_VER_60] = {"RTL8125A" }, + [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, + /* reserve 62 for CFG_METHOD_4 in the vendor driver */ + [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, +}; + +static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502) }, + { PCI_VDEVICE(REALTEK, 0x2600) }, + { PCI_VDEVICE(REALTEK, 0x8129) }, + { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT }, + { PCI_VDEVICE(REALTEK, 0x8161) }, + { PCI_VDEVICE(REALTEK, 0x8162) }, + { PCI_VDEVICE(REALTEK, 0x8167) }, + { PCI_VDEVICE(REALTEK, 0x8168) }, + { PCI_VDEVICE(NCUBE, 0x8168) }, + { PCI_VDEVICE(REALTEK, 0x8169) }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 }, + { PCI_VDEVICE(DLINK, 0x4300) }, + { PCI_VDEVICE(DLINK, 0x4302) }, + { PCI_VDEVICE(AT, 0xc107) }, + { PCI_VDEVICE(USR, 0x0116) }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 }, + { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 }, + { PCI_VDEVICE(REALTEK, 0x8125) }, + { PCI_VDEVICE(REALTEK, 0x3000) }, + {} +}; + +MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + +#define RTL_COALESCE_TX_USECS GENMASK(15, 12) +#define RTL_COALESCE_TX_FRAMES GENMASK(11, 8) +#define RTL_COALESCE_RX_USECS GENMASK(7, 4) +#define RTL_COALESCE_RX_FRAMES GENMASK(3, 0) + +#define RTL_COALESCE_T_MAX 0x0fU +#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_T_MAX * 4) + + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, + FuncForceEvent = 0xfc, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0000f000 +#define CSIAR_ADDR_MASK 0x00000fff + PMCH = 0x6f, +#define D3COLD_NO_PLL_DOWN BIT(7) +#define D3HOT_NO_PLL_DOWN BIT(6) +#define D3_NO_PLL_DOWN (BIT(7) | BIT(6)) + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl8125_registers { + IntrMask_8125 = 0x38, + IntrStatus_8125 = 0x3c, + TxPoll_8125 = 0x90, + MAC0_BKP = 0x19e0, + EEE_TXIDLE_TIMER_8125 = 0x6048, +}; + +#define RX_VLAN_INNER_8125 BIT(22) +#define RX_VLAN_OUTER_8125 BIT(23) +#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125) + +#define RX_FETCH_DFLT_8125 (8 << 27) + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, +#define RX_CONFIG_ACCEPT_ERR_MASK 0x30 + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_OK_MASK 0x0f +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + EnAnaPLL = (1 << 14), // 8169 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), +#define INTT_MASK GENMASK(1, 0) +#define CPCMD_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* ResetCounterCommand */ + CounterReset = 0x1, + + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* First doubleword. */ + TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ + TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7f + + /* Second doubleword. */ +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ff +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ + TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 0/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + +#define RxCSFailMask (IPFail | UDPFail | TCPFail) + + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RTL_GSO_MAX_SIZE_V1 32000 +#define RTL_GSO_MAX_SEGS_V1 24 +#define RTL_GSO_MAX_SIZE_V2 64000 +#define RTL_GSO_MAX_SEGS_V2 64 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED = 0, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_MAX +}; + +enum rtl_dash_type { + RTL_DASH_NONE, + RTL_DASH_DP, + RTL_DASH_EP, +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + enum rtl_dash_type dash_type; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + u16 cp_cmd; + u32 irq_mask; + struct clk *clk; + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct work_struct work; + } wk; + + unsigned supports_gmii:1; + unsigned aspm_manageable:1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + int eee_adv; + + const char *fw_name; + struct rtl_fw *rtl_fw; + + u32 ocp_base; +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); +MODULE_SOFTDEP("pre: realtek"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8168FP_3); +MODULE_FIRMWARE(FIRMWARE_8107E_1); +MODULE_FIRMWARE(FIRMWARE_8107E_2); +MODULE_FIRMWARE(FIRMWARE_8125A_3); +MODULE_FIRMWARE(FIRMWARE_8125B_2); + +static inline struct device *tp_to_dev(struct rtl8169_private *tp) +{ + return &tp->pci_dev->dev; +} + +static void rtl_lock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Lock); +} + +static void rtl_unlock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); +} + +static void rtl_pci_commit(struct rtl8169_private *tp) +{ + /* Read an arbitrary register to commit a preceding PCI write */ + RTL_R8(tp, ChipCmd); +} + +static bool rtl_is_8125(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_60; +} + +static bool rtl_is_8168evl_up(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_39 && + tp->mac_version <= RTL_GIGA_MAC_VER_53; +} + +static bool rtl_supports_eee(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_37 && + tp->mac_version != RTL_GIGA_MAC_VER_39; +} + +static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + mac[i] = RTL_R8(tp, reg + i); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + unsigned long usecs, int n, bool high) +{ + int i; + + for (i = 0; i < n; i++) { + if (c->check(tp) == high) + return true; + fsleep(usecs); + } + + if (net_ratelimit()) + netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n", + c->msg, !high, n, usecs); + return false; +} + +static bool rtl_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, true); +} + +static bool rtl_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) +{ + /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ + if (type == ERIAR_OOB && + (tp->mac_version == RTL_GIGA_MAC_VER_52 || + tp->mac_version == RTL_GIGA_MAC_VER_53)) + *cmd |= 0xf70 << 18; +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + return RTL_R32(tp, ERIAR) & ERIAR_FLAG; +} + +static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + u32 cmd = ERIAR_WRITE_CMD | type | mask | addr; + + if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask)) + return; + + RTL_W32(tp, ERIDR, val); + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val) +{ + _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC); +} + +static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr; + + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(tp, ERIDR) : ~0; +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr) +{ + return _rtl_eri_read(tp, addr, ERIAR_EXGMAC); +} + +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m) +{ + u32 val = rtl_eri_read(tp, addr); + + rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p); +} + +static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p) +{ + rtl_w0w1_eri(tp, addr, p, 0); +} + +static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m) +{ + rtl_w0w1_eri(tp, addr, 0, m); +} + +static bool rtl_ocp_reg_failure(u32 reg) +{ + return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg); +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, GPHY_OCP, reg << 15); + + return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, OCPDR, reg << 15); + + return RTL_R32(tp, OCPDR); +} + +static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, + u16 set) +{ + u16 data = r8168_mac_ocp_read(tp, reg); + + r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); +} + +/* Work around a hw issue with RTL8168g PHY, the quirk disables + * PHY MCU interrupts before PHY power-down. + */ +static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + if (value & BMCR_RESET || !(value & BMCR_PDOWN)) + rtl_eri_set_bits(tp, 0x1a8, 0xfc000000); + else + rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000); + break; + default: + break; + } +}; + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR) + rtl8168g_phy_suspend_quirk(tp, value); + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (reg == 0x1f) + return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4; + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + return RTL_R32(tp, PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + return RTL_R32(tp, OCPAR) & OCPAR_FLAG; +} + +static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) +{ + RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); +} + +static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_1_mdio_access(tp, reg, + OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); +} + +static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) +{ + r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? + RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_2_mdio_start(tp); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(tp); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + /* Work around issue with chip reporting wrong PHY ID */ + if (reg == MII_PHYSID2) + return 0xc912; + + r8168dp_2_mdio_start(tp); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(tp); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, int val) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + r8168dp_1_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_2_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168g_mdio_write(tp, location, val); + break; + default: + r8169_mdio_write(tp, location, val); + break; + } +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + return r8168dp_1_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_2_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + return r8168g_mdio_read(tp, location); + default: + return r8169_mdio_read(tp, location); + } +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff)); + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(tp, OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + return _rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + RTL_W32(tp, OCPDR, data); + RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd); + + r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_dp_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return r8168ep_ocp_read(tp, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + return RTL_R8(tp, IBISR0) & 0x20; +} + +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); + rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000); + RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); + RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_start(tp); + else + rtl8168ep_driver_start(tp); +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_stop(tp); + else + rtl8168ep_driver_stop(tp); +} + +static bool r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & BIT(15); +} + +static bool r8168ep_check_dash(struct rtl8169_private *tp) +{ + return r8168ep_ocp_read(tp, 0x128) & BIT(0); +} + +static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53: + return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE; + default: + return RTL_DASH_NONE; + } +} + +static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + if (enable) + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN); + else + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN); + break; + default: + break; + } +} + +static void rtl_reset_packet_filter(struct rtl8169_private *tp) +{ + rtl_eri_clear_bits(tp, 0xdc, BIT(0)); + rtl_eri_set_bits(tp, 0xdc, BIT(0)); +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; +} + +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u32 rtl_get_events(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + return RTL_R32(tp, IntrStatus_8125); + else + return RTL_R16(tp, IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u32 bits) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrStatus_8125, bits); + else + RTL_W16(tp, IntrStatus, bits); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, 0); + else + RTL_W16(tp, IntrMask, 0); +} + +static void rtl_irq_enable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, tp->irq_mask); + else + RTL_W16(tp, IntrMask, tp->irq_mask); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + rtl_irq_disable(tp); + rtl_ack_events(tp, 0xffffffff); + rtl_pci_commit(tp); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else if (phydev->speed == SPEED_100) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + rtl_reset_packet_filter(tp); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (phydev->speed == SPEED_10) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + } + } +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + wol->supported = WAKE_ANY; + wol->wolopts = tp->saved_wolopts; +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } + }; + unsigned int i, tmp = ARRAY_SIZE(cfg); + u8 options; + + rtl_unlock_config_regs(tp); + + if (rtl_is_8168evl_up(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2); + else + rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2); + } else if (rtl_is_8125(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0)); + else + r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); + } + + for (i = 0; i < tmp; i++) { + options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(tp, cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + options = RTL_R8(tp, Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(tp, Config1, options); + break; + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + options = RTL_R8(tp, Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(tp, Config2, options); + break; + default: + break; + } + + rtl_lock_config_regs(tp); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); + rtl_set_d3_pll_down(tp, !wolopts); + tp->dev->wol_enabled = wolopts ? 1 : 0; +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + + tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); + + return 0; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (rtl_fw) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > ETH_DATA_LEN && + tp->mac_version > RTL_GIGA_MAC_VER_06) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO); + + return features; +} + +static void rtl_set_rx_config_features(struct rtl8169_private *tp, + netdev_features_t features) +{ + u32 rx_config = RTL_R32(tp, RxConfig); + + if (features & NETIF_F_RXALL) + rx_config |= RX_CONFIG_ACCEPT_ERR_MASK; + else + rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK; + + if (rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rx_config |= RX_VLAN_8125; + else + rx_config &= ~RX_VLAN_8125; + } + + RTL_W32(tp, RxConfig, rx_config); +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_set_rx_config_features(tp, features); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (!rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + } + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; + + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); +} + +static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) +{ + u32 cmd = lower_32_bits(tp->counters_phys_addr); + + RTL_W32(tp, CounterAddrHigh, upper_32_bits(tp->counters_phys_addr)); + rtl_pci_commit(tp); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); + + rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); +} + +static void rtl8169_update_counters(struct rtl8169_private *tp) +{ + u8 val = RTL_R8(tp, ChipCmd); + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. If 0xff chip may be in a PCI power-save state. + */ + if (val & CmdRxEnb && val != 0xff) + rtl8169_do_counters(tp, CounterDump); +} + +static void rtl8169_init_counter_offsets(struct rtl8169_private *tp) +{ + struct rtl8169_counters *counters = tp->counters; + + /* + * rtl8169_init_counter_offsets is called from rtl_open. On chip + * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only + * reset by a power cycle, while the counter values collected by the + * driver are reset at every driver unload/load cycle. + * + * To make sure the HW values returned by @get_stats64 match the SW + * values, we collect the initial values at first open(*) and use them + * as offsets to normalize the values returned by @get_stats64. + * + * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one + * for the reason stated in rtl8169_update_counters; CmdRxEnb is only + * set at open time by rtl_hw_start. + */ + + if (tp->tc_offset.inited) + return; + + if (tp->mac_version >= RTL_GIGA_MAC_VER_19) { + rtl8169_do_counters(tp, CounterReset); + } else { + rtl8169_update_counters(tp); + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.rx_missed = counters->rx_missed; + } + + tp->tc_offset.inited = true; +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters; + + counters = tp->counters; + rtl8169_update_counters(tp); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +/* + * Interrupt coalescing + * + * > 1 - the availability of the IntrMitigate (0xe2) register through the + * > 8169, 8168 and 810x line of chipsets + * + * 8169, 8168, and 8136(810x) serial chipsets support it. + * + * > 2 - the Tx timer unit at gigabit speed + * + * The unit of the timer depends on both the speed and the setting of CPlusCmd + * (0xe0) bit 1 and bit 0. + * + * For 8169 + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 320ns 2.56us 40.96us + * 0 1 2.56us 20.48us 327.7us + * 1 0 5.12us 40.96us 655.4us + * 1 1 10.24us 81.92us 1.31ms + * + * For the other + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 5us 2.56us 40.96us + * 0 1 40us 20.48us 327.7us + * 1 0 80us 40.96us 655.4us + * 1 1 160us 81.92us 1.31ms + */ + +/* rx/tx scale factors for all CPlusCmd[0:1] cases */ +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +/* produce array with base delay *1, *8, *8*2, *8*2*2 */ +#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) } + +static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { + { SPEED_1000, COALESCE_DELAY(320) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; + +static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { + { SPEED_1000, COALESCE_DELAY(5000) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; +#undef COALESCE_DELAY + +/* get rx/tx scale vector corresponding to current speed */ +static const struct rtl_coalesce_info * +rtl_coalesce_info(struct rtl8169_private *tp) +{ + const struct rtl_coalesce_info *ci; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + ci = rtl_coalesce_info_8169; + else + ci = rtl_coalesce_info_8168_8136; + + /* if speed is unknown assume highest one */ + if (tp->phydev->speed == SPEED_UNKNOWN) + return ci; + + for (; ci->speed; ci++) { + if (tp->phydev->speed == ci->speed) + return ci; + } + + return ERR_PTR(-ELNRNG); +} + +static int rtl_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rtl8169_private *tp = netdev_priv(dev); + const struct rtl_coalesce_info *ci; + u32 scale, c_us, c_fr; + u16 intrmit; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + memset(ec, 0, sizeof(*ec)); + + /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK]; + + intrmit = RTL_R16(tp, IntrMitigate); + + c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit); + ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit); + /* ethtool_coalesce states usecs and max_frames must not both be 0 */ + ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit); + ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit); + ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + return 0; +} + +/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */ +static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec, + u16 *cp01) +{ + const struct rtl_coalesce_info *ci; + u16 i; + + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + for (i = 0; i < 4; i++) { + if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) { + *cp01 = i; + return ci->scale_nsecs[i]; + } + } + + return -ERANGE; +} + +static int rtl_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 tx_fr = ec->tx_max_coalesced_frames; + u32 rx_fr = ec->rx_max_coalesced_frames; + u32 coal_usec_max, units; + u16 w = 0, cp01 = 0; + int scale; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX) + return -ERANGE; + + coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs); + scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01); + if (scale < 0) + return scale; + + /* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it + * not only when usecs=0 because of e.g. the following scenario: + * + * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX) + * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1 + * - then user does `ethtool -C eth0 rx-usecs 100` + * + * Since ethtool sends to kernel whole ethtool_coalesce settings, + * if we want to ignore rx_frames then it has to be set to 0. + */ + if (rx_fr == 1) + rx_fr = 0; + if (tx_fr == 1) + tx_fr = 0; + + /* HW requires time limit to be set if frame limit is set */ + if ((tx_fr && !ec->tx_coalesce_usecs) || + (rx_fr && !ec->rx_coalesce_usecs)) + return -EINVAL; + + w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4)); + w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4)); + + units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units); + units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units); + + RTL_W16(tp, IntrMitigate, w); + + /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */ + if (rtl_is_8168evl_up(tp)) { + if (!rx_fr && !tx_fr) + /* disable packet counter */ + tp->cp_cmd |= PktCntrDisable; + else + tp->cp_cmd &= ~PktCntrDisable; + } + + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + return phy_ethtool_get_eee(tp->phydev, data); +} + +static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_eee(tp->phydev, data); + + if (!ret) + tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, + MDIO_AN_EEE_ADV); + return ret; +} + +static void rtl8169_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *data) +{ + data->rx_max_pending = NUM_RX_DESC; + data->rx_pending = NUM_RX_DESC; + data->tx_max_pending = NUM_TX_DESC; + data->tx_pending = NUM_TX_DESC; +} + +static void rtl8169_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + bool tx_pause, rx_pause; + + phy_get_pause(tp->phydev, &tx_pause, &rx_pause); + + data->autoneg = tp->phydev->autoneg; + data->tx_pause = tx_pause ? 1 : 0; + data->rx_pause = rx_pause ? 1 : 0; +} + +static int rtl8169_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN) + return -EOPNOTSUPP; + + phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause); + + return 0; +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_coalesce = rtl_get_coalesce, + .set_coalesce = rtl_set_coalesce, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, + .nway_reset = phy_ethtool_nway_reset, + .get_eee = rtl8169_get_eee, + .set_eee = rtl8169_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = rtl8169_get_ringparam, + .get_pauseparam = rtl8169_get_pauseparam, + .set_pauseparam = rtl8169_set_pauseparam, +}; + +static void rtl_enable_eee(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int adv; + + /* respect EEE advertisement the user may have set */ + if (tp->eee_adv >= 0) + adv = tp->eee_adv; + else + adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + + if (adv >= 0) + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); +} + +static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) +{ + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; + } mac_info[] = { + /* 8125B family. */ + { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, + + /* 8125A family. */ + { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, + { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + + /* RTL8117 */ + { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 }, + { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 }, + + /* 8168EP family. */ + { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, + { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, + { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, + + /* 8168H family. */ + { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 }, + { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, + + /* 8168G family. */ + { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, + { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 }, + { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, + { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 }, + { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, + { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 }, + { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 }, + { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 }, + { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + /* It seems this early RTL8168dp version never made it to + * the wild. Let's see whether somebody complains, if not + * we'll remove support for this chip version completely. + * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, + */ + { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, + { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 }, + { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 }, + { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 }, + { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 }, + { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 }, + { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 }, + { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 }, + { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, + { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, + { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 }, + { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 }, + { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 }, + { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 }, + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 }, + { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 }, + { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 }, + { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR + * Not even r8101 vendor driver knows these id's, + * so let's disable detection for now. -- HK + * { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 }, + * { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 }, + */ + + /* 8110 family. */ + { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 }, + { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 }, + { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 }, + { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 }, + { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 }, + + /* Catch-all */ + { 0x000, 0x000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + enum mac_version ver; + + while ((xid & p->mask) != p->val) + p++; + ver = p->ver; + + if (ver != RTL_GIGA_MAC_NONE && !gmii) { + if (ver == RTL_GIGA_MAC_VER_42) + ver = RTL_GIGA_MAC_VER_43; + else if (ver == RTL_GIGA_MAC_VER_45) + ver = RTL_GIGA_MAC_VER_47; + else if (ver == RTL_GIGA_MAC_VER_46) + ver = RTL_GIGA_MAC_VER_48; + } + + return ver; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (tp->rtl_fw) { + rtl_fw_release_firmware(tp->rtl_fw); + kfree(tp->rtl_fw); + tp->rtl_fw = NULL; + } +} + +void r8169_apply_firmware(struct rtl8169_private *tp) +{ + int val; + + /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ + if (tp->rtl_fw) { + rtl_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + + /* PHY soft reset may still be in progress */ + phy_read_poll_timeout(tp->phydev, MII_BMCR, val, + !(val & BMCR_RESET), + 50000, 600000, true); + } +} + +static void rtl8168_config_eee_mac(struct rtl8169_private *tp) +{ + /* Adjust EEE LED frequency */ + if (tp->mac_version != RTL_GIGA_MAC_VER_38) + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + rtl_eri_set_bits(tp, 0x1b0, 0x0003); +} + +static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) +{ + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); + r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); +} + +static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp) +{ + RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20); +} + +static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) +{ + rtl8125_set_eee_txidle_timer(tp); + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr)); + rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4)); + rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16); + rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2)); +} + +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) +{ + u16 data1, data2, ioffset; + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data1 = r8168_mac_ocp_read(tp, 0xdd02); + data2 = r8168_mac_ocp_read(tp, 0xdd00); + + ioffset = (data2 >> 1) & 0x7ff8; + ioffset |= data2 & 0x0007; + if (data1 & BIT(7)) + ioffset |= BIT(15); + + return ioffset; +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + set_bit(flag, tp->wk.flags); + schedule_work(&tp->wk.work); +} + +static void rtl8169_init_phy(struct rtl8169_private *tp) +{ + r8169_hw_phy_config(tp, tp->phydev, tp->mac_version); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + /* set undocumented MAC Reg C+CR Offset 0x82h */ + RTL_W8(tp, 0x82, 0x01); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_05 && + tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE && + tp->pci_dev->subsystem_device == 0xe000) + phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b); + + /* We may have called phy_speed_down before */ + phy_speed_up(tp->phydev); + + if (rtl_supports_eee(tp)) + rtl_enable_eee(tp); + + genphy_soft_reset(tp->phydev); +} + +static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_unlock_config_regs(tp); + + RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4)); + rtl_pci_commit(tp); + + RTL_W32(tp, MAC0, get_unaligned_le32(addr)); + rtl_pci_commit(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + rtl_lock_config_regs(tp); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static void rtl_wol_enable_rx(struct rtl8169_private *tp) +{ + if (tp->mac_version >= RTL_GIGA_MAC_VER_25) + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); +} + +static void rtl_prepare_power_down(struct rtl8169_private *tp) +{ + if (tp->dash_type != RTL_DASH_NONE) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (device_may_wakeup(tp_to_dev(tp))) { + phy_speed_down(tp->phydev, false); + rtl_wol_enable_rx(tp); + } +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); + break; + default: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x24); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); +} + +static void rtl_jumbo_config(struct rtl8169_private *tp) +{ + bool jumbo = tp->dev->mtu > ETH_DATA_LEN; + int readrq = 4096; + + rtl_unlock_config_regs(tp); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + if (jumbo) { + readrq = 512; + r8168b_1_hw_jumbo_enable(tp); + } else { + r8168b_1_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: + if (jumbo) { + readrq = 512; + r8168c_hw_jumbo_enable(tp); + } else { + r8168c_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: + if (jumbo) + r8168dp_hw_jumbo_enable(tp); + else + r8168dp_hw_jumbo_disable(tp); + break; + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: + if (jumbo) + r8168e_hw_jumbo_enable(tp); + else + r8168e_hw_jumbo_disable(tp); + break; + default: + break; + } + rtl_lock_config_regs(tp); + + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) + pcie_set_readrq(tp->pci_dev, readrq); + + /* Chip doesn't support pause in jumbo mode */ + if (jumbo) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising); + phy_start_aneg(tp->phydev); + } +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + return RTL_R8(tp, ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + RTL_W8(tp, ChipCmd, CmdReset); + + rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + + /* firmware loaded already or no firmware available */ + if (tp->rtl_fw || !tp->fw_name) + return; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + return; + + rtl_fw->phy_write = rtl_writephy; + rtl_fw->phy_read = rtl_readphy; + rtl_fw->mac_mcu_write = mac_mcu_write; + rtl_fw->mac_mcu_read = mac_mcu_read; + rtl_fw->fw_name = tp->fw_name; + rtl_fw->dev = tp_to_dev(tp); + + if (rtl_fw_request_firmware(rtl_fw)) + kfree(rtl_fw); + else + tp->rtl_fw = rtl_fw; +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + return RTL_R8(tp, TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond_2) +{ + /* IntrMitigate has new functionality on RTL8125 */ + return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103; +} + +static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_63: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); + break; + default: + break; + } +} + +static void rtl_enable_rxdvgate(struct rtl8169_private *tp) +{ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); + fsleep(2000); + rtl_wait_txrx_fifo_empty(tp); +} + +static void rtl_set_tx_config_registers(struct rtl8169_private *tp) +{ + u32 val = TX_DMA_BURST << TxDMAShift | + InterFrameGap << TxInterFrameGapShift; + + if (rtl_is_8168evl_up(tp)) + val |= TXCFG_AUTO_FIFO; + + RTL_W32(tp, TxConfig, val); +} + +static void rtl_set_rx_max_size(struct rtl8169_private *tp) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static void rtl8169_set_magic_reg(struct rtl8169_private *tp) +{ + u32 val; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + val = 0x000fff00; + else if (tp->mac_version == RTL_GIGA_MAC_VER_06) + val = 0x00ffff00; + else + return; + + if (RTL_R8(tp, Config2) & PCI_Clock_66MHz) + val |= 0xff; + + RTL_W32(tp, 0x7c, val); +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast; + /* Multicast hash filter */ + u32 mc_filter[2] = { 0xffffffff, 0xffffffff }; + struct rtl8169_private *tp = netdev_priv(dev); + u32 tmp; + + if (dev->flags & IFF_PROMISC) { + rx_mode |= AcceptAllPhys; + } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || + dev->flags & IFF_ALLMULTI || + tp->mac_version == RTL_GIGA_MAC_VER_35) { + /* accept all multicasts */ + } else if (netdev_mc_empty(dev)) { + rx_mode &= ~AcceptMulticast; + } else { + struct netdev_hw_addr *ha; + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + u32 bit_nr = eth_hw_addr_crc(ha) >> 26; + mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31); + } + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); + } + } + + RTL_W32(tp, MAR0 + 4, mc_filter[1]); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); + + tmp = RTL_R32(tp, RxConfig); + RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + return RTL_R32(tp, CSIAR) & CSIAR_FLAG; +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE | func << 16); + + rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | + CSIAR_BYTE_ENABLE); + + return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(tp, CSIDR) : ~0; +} + +static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) +{ + struct pci_dev *pdev = tp->pci_dev; + u32 csi; + + /* According to Realtek the value at config space address 0x070f + * controls the L0s/L1 entrance latency. We try standard ECAM access + * first and if it fails fall back to CSI. + * bit 0..2: L0: 0 = 1us, 1 = 2us .. 6 = 7us, 7 = 7us (no typo) + * bit 3..5: L1: 0 = 1us, 1 = 2us .. 6 = 64us, 7 = 64us + */ + if (pdev->cfg_size > 0x070f && + pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) + return; + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | val << 24); +} + +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) +{ + /* L0 7us, L1 16us */ + rtl_set_aspm_entry_latency(tp, 0x27); +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void __rtl_ephy_init(struct rtl8169_private *tp, + const struct ephy_info *e, int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a)) + +static void rtl_disable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) +{ + /* work around an issue when PCI reset occurs during L2/L3 state */ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); +} + +static void rtl_enable_exit_l1(struct rtl8169_private *tp) +{ + /* Bits control which events trigger ASPM L1 exit: + * Bit 12: rxdv + * Bit 11: ltr_msg + * Bit 10: txdma_poll + * Bit 9: xadm + * Bit 8: pktavi + * Bit 7: txpla + */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + break; + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38: + rtl_eri_set_bits(tp, 0xd4, 0x0c00); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + rtl_eri_set_bits(tp, 0xd4, 0x1f80); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80); + break; + default: + break; + } +} + +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + /* Don't enable ASPM in the chip if OS can't control ASPM */ + if (enable && tp->aspm_manageable) { + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } + + udelay(10); +} + +static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat, + u16 tx_stat, u16 rx_dyn, u16 tx_dyn) +{ + /* Usage of dynamic vs. static FIFO is controlled by bit + * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known. + */ + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn); +} + +static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp, + u8 low, u8 high) +{ + /* FIFO thresholds for pause flow control */ + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high); +} + +static void rtl_hw_start_8168b(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168cp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(tp, DBG_REG, 0x20); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0020 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168c_2); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, 0x0000, 0x0048 }, + { 0x19, 0x0020, 0x0050 }, + { 0x0c, 0x0100, 0x0020 }, + { 0x10, 0x0004, 0x0000 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168d_4); + + rtl_enable_clock_request(tp); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_1); + + rtl_disable_clock_request(tp); + + /* Reset tx FIFO pointer */ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); + + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_2); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_eri_set_bits(tp, 0x1d0, BIT(1)); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl8168_config_eee_mac(tp); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_disable(tp); + + rtl_ephy_init(tp, e_info_8168f_1); +} + +static void rtl_hw_start_8168g(struct rtl8169_private *tp) +{ + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_1[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8000, 0x0000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_1); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 }, + { 0x00, 0xffff, 0x10a3 }, + { 0x06, 0xffff, 0xf050 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x4000, 0x0000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_2); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x00, 0x0000, 0x0080 }, + { 0x06, 0x0000, 0x0010 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x0000, 0x4000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8411_2); + + /* The following Realtek-provided magic fixes an issue with the RX unit + * getting confused after the PHY having been powered-down. + */ + r8168_mac_ocp_write(tp, 0xFC28, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + r8168_mac_ocp_write(tp, 0xFC30, 0x0000); + r8168_mac_ocp_write(tp, 0xFC32, 0x0000); + r8168_mac_ocp_write(tp, 0xFC34, 0x0000); + r8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + r8168_mac_ocp_write(tp, 0xFC26, 0x0000); + + r8168_mac_ocp_write(tp, 0xF800, 0xE008); + r8168_mac_ocp_write(tp, 0xF802, 0xE00A); + r8168_mac_ocp_write(tp, 0xF804, 0xE00C); + r8168_mac_ocp_write(tp, 0xF806, 0xE00E); + r8168_mac_ocp_write(tp, 0xF808, 0xE027); + r8168_mac_ocp_write(tp, 0xF80A, 0xE04F); + r8168_mac_ocp_write(tp, 0xF80C, 0xE05E); + r8168_mac_ocp_write(tp, 0xF80E, 0xE065); + r8168_mac_ocp_write(tp, 0xF810, 0xC602); + r8168_mac_ocp_write(tp, 0xF812, 0xBE00); + r8168_mac_ocp_write(tp, 0xF814, 0x0000); + r8168_mac_ocp_write(tp, 0xF816, 0xC502); + r8168_mac_ocp_write(tp, 0xF818, 0xBD00); + r8168_mac_ocp_write(tp, 0xF81A, 0x074C); + r8168_mac_ocp_write(tp, 0xF81C, 0xC302); + r8168_mac_ocp_write(tp, 0xF81E, 0xBB00); + r8168_mac_ocp_write(tp, 0xF820, 0x080A); + r8168_mac_ocp_write(tp, 0xF822, 0x6420); + r8168_mac_ocp_write(tp, 0xF824, 0x48C2); + r8168_mac_ocp_write(tp, 0xF826, 0x8C20); + r8168_mac_ocp_write(tp, 0xF828, 0xC516); + r8168_mac_ocp_write(tp, 0xF82A, 0x64A4); + r8168_mac_ocp_write(tp, 0xF82C, 0x49C0); + r8168_mac_ocp_write(tp, 0xF82E, 0xF009); + r8168_mac_ocp_write(tp, 0xF830, 0x74A2); + r8168_mac_ocp_write(tp, 0xF832, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF834, 0x74A0); + r8168_mac_ocp_write(tp, 0xF836, 0xC50E); + r8168_mac_ocp_write(tp, 0xF838, 0x9CA2); + r8168_mac_ocp_write(tp, 0xF83A, 0x1C11); + r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF83E, 0xE006); + r8168_mac_ocp_write(tp, 0xF840, 0x74F8); + r8168_mac_ocp_write(tp, 0xF842, 0x48C4); + r8168_mac_ocp_write(tp, 0xF844, 0x8CF8); + r8168_mac_ocp_write(tp, 0xF846, 0xC404); + r8168_mac_ocp_write(tp, 0xF848, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84A, 0xC403); + r8168_mac_ocp_write(tp, 0xF84C, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2); + r8168_mac_ocp_write(tp, 0xF850, 0x0C0A); + r8168_mac_ocp_write(tp, 0xF852, 0xE434); + r8168_mac_ocp_write(tp, 0xF854, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF856, 0x49D9); + r8168_mac_ocp_write(tp, 0xF858, 0xF01F); + r8168_mac_ocp_write(tp, 0xF85A, 0xC526); + r8168_mac_ocp_write(tp, 0xF85C, 0x64A5); + r8168_mac_ocp_write(tp, 0xF85E, 0x1400); + r8168_mac_ocp_write(tp, 0xF860, 0xF007); + r8168_mac_ocp_write(tp, 0xF862, 0x0C01); + r8168_mac_ocp_write(tp, 0xF864, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF866, 0x1C15); + r8168_mac_ocp_write(tp, 0xF868, 0xC51B); + r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF86C, 0xE013); + r8168_mac_ocp_write(tp, 0xF86E, 0xC519); + r8168_mac_ocp_write(tp, 0xF870, 0x74A0); + r8168_mac_ocp_write(tp, 0xF872, 0x48C4); + r8168_mac_ocp_write(tp, 0xF874, 0x8CA0); + r8168_mac_ocp_write(tp, 0xF876, 0xC516); + r8168_mac_ocp_write(tp, 0xF878, 0x74A4); + r8168_mac_ocp_write(tp, 0xF87A, 0x48C8); + r8168_mac_ocp_write(tp, 0xF87C, 0x48CA); + r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4); + r8168_mac_ocp_write(tp, 0xF880, 0xC512); + r8168_mac_ocp_write(tp, 0xF882, 0x1B00); + r8168_mac_ocp_write(tp, 0xF884, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF886, 0x1B1C); + r8168_mac_ocp_write(tp, 0xF888, 0x483F); + r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2); + r8168_mac_ocp_write(tp, 0xF88C, 0x1B04); + r8168_mac_ocp_write(tp, 0xF88E, 0xC508); + r8168_mac_ocp_write(tp, 0xF890, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF892, 0xC505); + r8168_mac_ocp_write(tp, 0xF894, 0xBD00); + r8168_mac_ocp_write(tp, 0xF896, 0xC502); + r8168_mac_ocp_write(tp, 0xF898, 0xBD00); + r8168_mac_ocp_write(tp, 0xF89A, 0x0300); + r8168_mac_ocp_write(tp, 0xF89C, 0x051E); + r8168_mac_ocp_write(tp, 0xF89E, 0xE434); + r8168_mac_ocp_write(tp, 0xF8A0, 0xE018); + r8168_mac_ocp_write(tp, 0xF8A2, 0xE092); + r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20); + r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F); + r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4); + r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3); + r8168_mac_ocp_write(tp, 0xF8AE, 0xF007); + r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0); + r8168_mac_ocp_write(tp, 0xF8B2, 0xF103); + r8168_mac_ocp_write(tp, 0xF8B4, 0xC607); + r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8B8, 0xC606); + r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8BC, 0xC602); + r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C); + r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28); + r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C); + r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00); + r8168_mac_ocp_write(tp, 0xF8C8, 0xC707); + r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00); + r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2); + r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1); + r8168_mac_ocp_write(tp, 0xF8D0, 0xC502); + r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA); + r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0); + r8168_mac_ocp_write(tp, 0xF8D8, 0xC502); + r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8DC, 0x0132); + + r8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + r8168_mac_ocp_write(tp, 0xFC2A, 0x0743); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0801); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9); + r8168_mac_ocp_write(tp, 0xFC30, 0x02FD); + r8168_mac_ocp_write(tp, 0xFC32, 0x0C25); + r8168_mac_ocp_write(tp, 0xFC34, 0x00A9); + r8168_mac_ocp_write(tp, 0xFC36, 0x012D); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x854a }, + { 0x01, 0xffff, 0x068b } + }; + int rg_saw_cnt; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168h_1); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xdc, 0x001c); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008); + r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_1[] = { + { 0x00, 0xffff, 0x10ab }, + { 0x06, 0xffff, 0xf030 }, + { 0x08, 0xffff, 0x2006 }, + { 0x0d, 0xffff, 0x1666 }, + { 0x0c, 0x3ff0, 0x0000 } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_1); + + rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_2[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20ea } + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_2); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0x0000, 0x0080 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_3); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8117(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8117[] = { + { 0x19, 0x0040, 0x1100 }, + { 0x59, 0x0040, 0x1100 }, + }; + int rg_saw_cnt; + + rtl8168ep_stop_cmac(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8117); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x0010); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_write(tp, 0xea80, 0x0003); + r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + /* firmware is for MAC only */ + r8169_apply_firmware(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, FIX_NAK_1); + + RTL_W8(tp, Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + cfg1 = RTL_R8(tp, Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(tp, Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8401(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8401[] = { + { 0x01, 0xffff, 0x6fe5 }, + { 0x03, 0xffff, 0x0599 }, + { 0x06, 0xffff, 0xaf25 }, + { 0x07, 0xffff, 0x8e68 }, + }; + + rtl_ephy_init(tp, e_info_8401); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402); + + rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + rtl_hw_aspm_clkreq_enable(tp, false); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + + /* L0 7us, L1 32us - needed to avoid issues with link-up detection */ + rtl_set_aspm_entry_latency(tp, 0x2f); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond) +{ + return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13); +} + +static void rtl_hw_start_8125_common(struct rtl8169_private *tp) +{ + rtl_pcie_state_l2l3_disable(tp); + + RTL_W16(tp, 0x382, 0x221b); + RTL_W8(tp, 0x4500, 0); + RTL_W16(tp, 0x4800, 0); + + /* disable UPS */ + r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10); + + r8168_mac_ocp_write(tp, 0xc140, 0xffff); + r8168_mac_ocp_write(tp, 0xc142, 0xffff); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + /* disable new tx descriptor format */ + r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); + else + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000); + else + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); + r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); + r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); + r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); + r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); + r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); + r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); + + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001); + udelay(1); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000); + RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030); + + r8168_mac_ocp_write(tp, 0xe098, 0xc302); + + rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + rtl8125b_config_eee_mac(tp); + else + rtl8125a_config_eee_mac(tp); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + udelay(10); +} + +static void rtl_hw_start_8125a_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_1[] = { + { 0x01, 0xffff, 0xa812 }, + { 0x09, 0xffff, 0x520c }, + { 0x04, 0xffff, 0xd000 }, + { 0x0d, 0xffff, 0xf702 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x06, 0xffff, 0x001e }, + { 0x08, 0xffff, 0x3595 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x02, 0xffff, 0x6046 }, + { 0x29, 0xffff, 0xfe00 }, + { 0x23, 0xffff, 0xab62 }, + + { 0x41, 0xffff, 0xa80c }, + { 0x49, 0xffff, 0x520c }, + { 0x44, 0xffff, 0xd000 }, + { 0x4d, 0xffff, 0xf702 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x46, 0xffff, 0x001e }, + { 0x48, 0xffff, 0x3595 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x42, 0xffff, 0x6046 }, + { 0x69, 0xffff, 0xfe00 }, + { 0x63, 0xffff, 0xab62 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_1); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_2[] = { + { 0x04, 0xffff, 0xd000 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x23, 0xffff, 0xab66 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x29, 0xffff, 0xfe04 }, + + { 0x44, 0xffff, 0xd000 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x63, 0xffff, 0xab66 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x69, 0xffff, 0xfe04 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_2); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125b(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125b[] = { + { 0x0b, 0xffff, 0xa908 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x4b, 0xffff, 0xa908 }, + { 0x5e, 0xffff, 0x20eb }, + { 0x22, 0x0030, 0x0020 }, + { 0x62, 0x0030, 0x0020 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_aspm_clkreq_enable(tp, false); + + rtl_ephy_init(tp, e_info_8125b); + rtl_hw_start_8125_common(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_config(struct rtl8169_private *tp) +{ + static const rtl_generic_fct hw_configs[] = { + [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1, + [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, + [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, + [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1, + [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4, + [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2, + [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, + [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, + [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, + [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, + [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2, + [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402, + [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411, + [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106, + [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2, + [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1, + [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2, + [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1, + [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, + [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, + }; + + if (hw_configs[tp->mac_version]) + hw_configs[tp->mac_version](tp); +} + +static void rtl_hw_start_8125(struct rtl8169_private *tp) +{ + int i; + + /* disable interrupt coalescing */ + for (i = 0xa00; i < 0xb00; i += 4) + RTL_W32(tp, i, 0); + + rtl_hw_config(tp); +} + +static void rtl_hw_start_8168(struct rtl8169_private *tp) +{ + if (rtl_is_8168evl_up(tp)) + RTL_W8(tp, MaxTxPacketSize, EarlySize); + else + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + rtl_hw_config(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start_8169(struct rtl8169_private *tp) +{ + RTL_W8(tp, EarlyTxThres, NoEarlyTx); + + tp->cp_cmd |= PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) + tp->cp_cmd |= EnAnaPLL; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start(struct rtl8169_private *tp) +{ + rtl_unlock_config_regs(tp); + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + rtl_hw_start_8169(tp); + else if (rtl_is_8125(tp)) + rtl_hw_start_8125(tp); + else + rtl_hw_start_8168(tp); + + rtl_enable_exit_l1(tp); + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + rtl_lock_config_regs(tp); + + rtl_jumbo_config(tp); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + rtl_pci_commit(tp); + + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_init_rxcfg(tp); + rtl_set_tx_config_registers(tp); + rtl_set_rx_config_features(tp, tp->dev->features); + rtl_set_rx_mode(tp->dev); + rtl_irq_enable(tp); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + dev->mtu = new_mtu; + netdev_update_features(dev); + rtl_jumbo_config(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + rtl8125_set_eee_txidle_timer(tp); + break; + default: + break; + } + + return 0; +} + +static void rtl8169_mark_to_asic(struct RxDesc *desc) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts2 = 0; + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE)); +} + +static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + struct device *d = tp_to_dev(tp); + int node = dev_to_node(d); + dma_addr_t mapping; + struct page *data; + + data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE)); + if (!data) + return NULL; + + mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + netdev_err(tp->dev, "Failed to map RX DMA!\n"); + __free_pages(data, get_order(R8169_RX_BUF_SIZE)); + return NULL; + } + + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc); + + return data; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) { + dma_unmap_page(tp_to_dev(tp), + le64_to_cpu(tp->RxDescArray[i].addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE)); + tp->Rx_databuff[i] = NULL; + tp->RxDescArray[i].addr = 0; + tp->RxDescArray[i].opts1 = 0; + } +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + struct page *data; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_rx_clear(tp); + return -ENOMEM; + } + tp->Rx_databuff[i] = data; + } + + /* mark as last descriptor in the ring */ + tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd); + + return 0; +} + +static int rtl8169_init_ring(struct rtl8169_private *tp) +{ + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); + memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry) +{ + struct ring_info *tx_skb = tp->tx_skb + entry; + struct TxDesc *desc = tp->TxDescArray + entry; + + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len, + DMA_TO_DEVICE); + memset(desc, 0, sizeof(*desc)); + memset(tx_skb, 0, sizeof(*tx_skb)); +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(tp, entry); + if (skb) + dev_consume_skb_any(skb); + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + netdev_reset_queue(tp->dev); +} + +static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) +{ + napi_disable(&tp->napi); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_net(); + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + if (going_down && tp->dev->wol_enabled) + goto no_reset; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + rtl_enable_rxdvgate(tp); + fsleep(2000); + break; + default: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + fsleep(100); + break; + } + + rtl_hw_reset(tp); +no_reset: + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + int i; + + netif_stop_queue(tp->dev); + + rtl8169_cleanup(tp, false); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i); + + napi_enable(&tp->napi); + rtl_hw_start(tp); +} + +static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len, + void *addr, unsigned int entry, bool desc_own) +{ + struct TxDesc *txd = tp->TxDescArray + entry; + struct device *d = tp_to_dev(tp); + dma_addr_t mapping; + u32 opts1; + int ret; + + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + ret = dma_mapping_error(d, mapping); + if (unlikely(ret)) { + if (net_ratelimit()) + netdev_err(tp->dev, "Failed to map TX data!\n"); + return ret; + } + + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts[1]); + + opts1 = opts[0] | len; + if (entry == NUM_TX_DESC - 1) + opts1 |= RingEnd; + if (desc_own) + opts1 |= DescOwn; + txd->opts1 = cpu_to_le32(opts1); + + tp->tx_skb[entry].len = len; + + return 0; +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + const u32 *opts, unsigned int entry) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag; + + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + void *addr = skb_frag_address(frag); + u32 len = skb_frag_size(frag); + + entry = (entry + 1) % NUM_TX_DESC; + + if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true))) + goto err_out; + } + + return 0; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_skb_is_udp(struct sk_buff *skb) +{ + int no = skb_network_offset(skb); + struct ipv6hdr *i6h, _i6h; + struct iphdr *ih, _ih; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih); + return ih && ih->protocol == IPPROTO_UDP; + case htons(ETH_P_IPV6): + i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h); + return i6h && i6h->nexthdr == IPPROTO_UDP; + default: + return false; + } +} + +#define RTL_MIN_PATCH_LEN 47 + +/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */ +static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto = 0, len = skb->len; + + if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && + rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { + unsigned int trans_data_len = skb_tail_pointer(skb) - + skb_transport_header(skb); + + if (trans_data_len >= offsetof(struct udphdr, len) && + trans_data_len < RTL_MIN_PATCH_LEN) { + u16 dest = ntohs(udp_hdr(skb)->dest); + + /* dest is a standard PTP port */ + if (dest == 319 || dest == 320) + padto = len + RTL_MIN_PATCH_LEN - trans_data_len; + } + + if (trans_data_len < sizeof(struct udphdr)) + padto = max_t(unsigned int, padto, + len + sizeof(struct udphdr) - trans_data_len); + } + + return padto; +} + +static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto; + + padto = rtl8125_quirk_udp_padto(tp, skb); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_60: + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + padto = max_t(unsigned int, padto, ETH_ZLEN); + break; + default: + break; + } + + return padto; +} + +static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) +{ + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + opts[0] |= TD_LSO; + opts[0] |= mss << TD0_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[0] |= TD0_IP_CS | TD0_TCP_CS; + else if (ip->protocol == IPPROTO_UDP) + opts[0] |= TD0_IP_CS | TD0_UDP_CS; + else + WARN_ON_ONCE(1); + } +} + +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 transport_offset = (u32)skb_transport_offset(skb); + struct skb_shared_info *shinfo = skb_shinfo(skb); + u32 mss = shinfo->gso_size; + + if (mss) { + if (shinfo->gso_type & SKB_GSO_TCPV4) { + opts[0] |= TD1_GTSENV4; + } else if (shinfo->gso_type & SKB_GSO_TCPV6) { + if (skb_cow_head(skb, 0)) + return false; + + tcp_v6_gso_csum_prep(skb); + opts[0] |= TD1_GTSENV6; + } else { + WARN_ON_ONCE(1); + } + + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= mss << TD1_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_protocol; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + opts[1] |= TD1_IPv4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts[1] |= TD1_IPv6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + opts[1] |= TD1_TCP_CS; + else if (ip_protocol == IPPROTO_UDP) + opts[1] |= TD1_UDP_CS; + else + WARN_ON_ONCE(1); + + opts[1] |= transport_offset << TCPHO_SHIFT; + } else { + unsigned int padto = rtl_quirk_packet_padto(tp, skb); + + /* skb_padto would free the skb on error */ + return !__skb_put_padto(skb, padto, false); + } + + return true; +} + +static bool rtl_tx_slots_avail(struct rtl8169_private *tp) +{ + unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC + - READ_ONCE(tp->cur_tx); + + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ + return slots_avail > MAX_SKB_FRAGS; +} + +/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ +static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + return false; + default: + return true; + } +} + +static void rtl8169_doorbell(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W16(tp, TxPoll_8125, BIT(0)); + else + RTL_W8(tp, TxPoll, NPQ); +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int frags = skb_shinfo(skb)->nr_frags; + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd_first, *txd_last; + bool stop_queue, door_bell; + u32 opts[2]; + + if (unlikely(!rtl_tx_slots_avail(tp))) { + if (net_ratelimit()) + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + opts[1] = rtl8169_tx_vlan_tag(skb); + opts[0] = 0; + + if (!rtl_chip_supports_csum_v2(tp)) + rtl8169_tso_csum_v1(skb, opts); + else if (!rtl8169_tso_csum_v2(tp, skb, opts)) + goto err_dma_0; + + if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data, + entry, false))) + goto err_dma_0; + + txd_first = tp->TxDescArray + entry; + + if (frags) { + if (rtl8169_xmit_frags(tp, skb, opts, entry)) + goto err_dma_1; + entry = (entry + frags) % NUM_TX_DESC; + } + + txd_last = tp->TxDescArray + entry; + txd_last->opts1 |= cpu_to_le32(LastFrag); + tp->tx_skb[entry].skb = skb; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + + txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag); + + /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */ + smp_wmb(); + + WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1); + + stop_queue = !rtl_tx_slots_avail(tp); + if (unlikely(stop_queue)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb__after_atomic(); + if (rtl_tx_slots_avail(tp)) + netif_start_queue(dev); + door_bell = true; + } + + if (door_bell) + rtl8169_doorbell(tp); + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(tp, entry); +err_dma_0: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static unsigned int rtl_last_frag_len(struct sk_buff *skb) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int nr_frags = info->nr_frags; + + if (!nr_frags) + return UINT_MAX; + + return skb_frag_size(info->frags + nr_frags - 1); +} + +/* Workaround for hw issues with TSO on RTL8168evl */ +static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb, + netdev_features_t features) +{ + /* IPv4 header has options field */ + if (vlan_get_protocol(skb) == htons(ETH_P_IP) && + ip_hdrlen(skb) > sizeof(struct iphdr)) + features &= ~NETIF_F_ALL_TSO; + + /* IPv4 TCP header has options field */ + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 && + tcp_hdrlen(skb) > sizeof(struct tcphdr)) + features &= ~NETIF_F_ALL_TSO; + + else if (rtl_last_frag_len(skb) <= 6) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static netdev_features_t rtl8169_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + int transport_offset = skb_transport_offset(skb); + struct rtl8169_private *tp = netdev_priv(dev); + + if (skb_is_gso(skb)) { + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + features = rtl8168evl_fix_tso(skb, features); + + if (transport_offset > GTTCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_ALL_TSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* work around hw bug on some chip versions */ + if (skb->len < ETH_ZLEN) + features &= ~NETIF_F_CSUM_MASK; + + if (rtl_quirk_packet_padto(tp, skb)) + features &= ~NETIF_F_CSUM_MASK; + + if (transport_offset > TCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_CSUM_MASK; + } + + return vlan_features_check(skb, features); +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int pci_status_errs; + u16 pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + + pci_status_errs = pci_status_get_and_clear_errors(pdev); + + if (net_ratelimit()) + netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n", + pci_cmd, pci_status_errs); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, + int budget) +{ + unsigned int dirty_tx, bytes_compl = 0, pkts_compl = 0; + struct sk_buff *skb; + + dirty_tx = tp->dirty_tx; + + while (READ_ONCE(tp->cur_tx) != dirty_tx) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + u32 status; + + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + skb = tp->tx_skb[entry].skb; + rtl8169_unmap_tx_skb(tp, entry); + + if (skb) { + pkts_compl++; + bytes_compl += skb->len; + napi_consume_skb(skb, budget); + } + dirty_tx++; + } + + if (tp->dirty_tx != dirty_tx) { + netdev_completed_queue(dev, pkts_compl, bytes_compl); + dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl); + + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_store_mb(tp->dirty_tx, dirty_tx); + if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) + netif_wake_queue(dev); + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + * If skb is NULL then we come here again once a tx irq is + * triggered after the last fragment is marked transmitted. + */ + if (tp->cur_tx != dirty_tx && skb) + rtl8169_doorbell(tp); + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & (RxProtoMask | RxCSFailMask); + + if (status == RxProtoTCP || status == RxProtoUDP) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget) +{ + struct device *d = tp_to_dev(tp); + int count; + + for (count = 0; count < budget; count++, tp->cur_rx++) { + unsigned int pkt_size, entry = tp->cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + struct sk_buff *skb; + const void *rx_buf; + dma_addr_t addr; + u32 status; + + status = le32_to_cpu(desc->opts1); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Rx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + if (unlikely(status & RxRES)) { + if (net_ratelimit()) + netdev_warn(dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + + if (!(dev->features & NETIF_F_RXALL)) + goto release_descriptor; + else if (status & RxRWT || !(status & (RxRUNT | RxCRC))) + goto release_descriptor; + } + + pkt_size = status & GENMASK(13, 0); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size -= ETH_FCS_LEN; + + /* The driver does not support incoming fragmented frames. + * They are seen as a symptom of over-mtu sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + + addr = le64_to_cpu(desc->addr); + rx_buf = page_address(tp->Rx_databuff[entry]); + + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(rx_buf); + skb_copy_to_linear_data(skb, rx_buf, pkt_size); + skb->tail += pkt_size; + skb->len = pkt_size; + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + rtl8169_rx_csum(skb, status); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + napi_gro_receive(&tp->napi, skb); + + dev_sw_netstats_rx_add(dev, pkt_size); +release_descriptor: + rtl8169_mark_to_asic(desc); + } + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct rtl8169_private *tp = dev_instance; + u32 status = rtl_get_events(tp); + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return IRQ_NONE; + + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(tp->dev); + goto out; + } + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + if (unlikely(status & RxFIFOOver && + tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(tp->dev); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + } + + if (napi_schedule_prep(&tp->napi)) { + rtl_irq_disable(tp); + __napi_schedule(&tp->napi); + } +out: + rtl_ack_events(tp, status); + + return IRQ_HANDLED; +} + +static void rtl_task(struct work_struct *work) +{ + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + + rtnl_lock(); + + if (!netif_running(tp->dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) { + rtl_reset_work(tp); + netif_wake_queue(tp->dev); + } +out_unlock: + rtnl_unlock(); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + int work_done; + + rtl_tx(dev, tp, budget); + + work_done = rtl_rx(dev, tp, budget); + + if (work_done < budget && napi_complete_done(napi, work_done)) + rtl_irq_enable(tp); + + return work_done; +} + +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + if (net_ratelimit()) + phy_print_status(tp->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + phy_attached_info(phydev); + + return 0; +} + +static void rtl8169_down(struct rtl8169_private *tp) +{ + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + phy_stop(tp->phydev); + + rtl8169_update_counters(tp); + + pci_clear_master(tp->pci_dev); + rtl_pci_commit(tp); + + rtl8169_cleanup(tp, true); + + rtl_prepare_power_down(tp); +} + +static void rtl8169_up(struct rtl8169_private *tp) +{ + pci_set_master(tp->pci_dev); + phy_init_hw(tp->phydev); + phy_resume(tp->phydev); + rtl8169_init_phy(tp); + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_reset_work(tp); + + phy_start(tp->phydev); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + netif_stop_queue(dev); + rtl8169_down(tp); + rtl8169_rx_clear(tp); + + cancel_work_sync(&tp->wk.work); + + free_irq(pci_irq_vector(pdev, 0), tp); + + phy_disconnect(tp->phydev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned long irqflags; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto out; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(tp); + if (retval < 0) + goto err_free_rx_1; + + rtl_request_firmware(tp); + + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED; + retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, + irqflags, dev->name, tp); + if (retval < 0) + goto err_release_fw_2; + + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + + rtl8169_up(tp); + rtl8169_init_counter_offsets(tp); + netif_start_queue(dev); +out: + pm_runtime_put_sync(&pdev->dev); + + return retval; + +err_free_irq: + free_irq(pci_irq_vector(pdev, 0), tp); +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + goto out; +} + +static void +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + struct rtl8169_counters *counters = tp->counters; + + pm_runtime_get_noresume(&pdev->dev); + + netdev_stats_to_stats64(stats, &dev->stats); + dev_fetch_sw_netstats(stats, dev->tstats); + + /* + * Fetch additional counter values missing in stats collected by driver + * from tally counters. + */ + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(tp); + + /* + * Subtract values fetched during initalization. + * See rtl8169_init_counter_offsets for a description why we do that. + */ + stats->tx_errors = le64_to_cpu(counters->tx_errors) - + le64_to_cpu(tp->tc_offset.tx_errors); + stats->collisions = le32_to_cpu(counters->tx_multi_collision) - + le32_to_cpu(tp->tc_offset.tx_multi_collision); + stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - + le16_to_cpu(tp->tc_offset.tx_aborted); + stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) - + le16_to_cpu(tp->tc_offset.rx_missed); + + pm_runtime_put_noidle(&pdev->dev); +} + +static void rtl8169_net_suspend(struct rtl8169_private *tp) +{ + netif_device_detach(tp->dev); + + if (netif_running(tp->dev)) + rtl8169_down(tp); +} + +#ifdef CONFIG_PM + +static int rtl8169_runtime_resume(struct device *dev) +{ + struct rtl8169_private *tp = dev_get_drvdata(dev); + + rtl_rar_set(tp, tp->dev->dev_addr); + __rtl8169_set_wol(tp, tp->saved_wolopts); + + if (tp->TxDescArray) + rtl8169_up(tp); + + netif_device_attach(tp->dev); + + return 0; +} + +static int __maybe_unused rtl8169_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + rtnl_lock(); + rtl8169_net_suspend(tp); + if (!device_may_wakeup(tp_to_dev(tp))) + clk_disable_unprepare(tp->clk); + rtnl_unlock(); + + return 0; +} + +static int __maybe_unused rtl8169_resume(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!device_may_wakeup(tp_to_dev(tp))) + clk_prepare_enable(tp->clk); + + /* Reportedly at least Asus X453MA truncates packets otherwise */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37) + rtl_init_rxcfg(tp); + + return rtl8169_runtime_resume(device); +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!tp->TxDescArray) { + netif_device_detach(tp->dev); + return 0; + } + + rtnl_lock(); + __rtl8169_set_wol(tp, WAKE_PHY); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev)) + pm_schedule_suspend(device, 10000); + + return -EBUSY; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume) + SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume, + rtl8169_runtime_idle) +}; + +#endif /* CONFIG_PM */ + +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(tp, ChipCmd, CmdRxEnb); + rtl_pci_commit(tp); + break; + default: + break; + } +} + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + rtnl_lock(); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + /* Restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); + + if (system_state == SYSTEM_POWER_OFF) { + if (tp->saved_wolopts) + rtl_wol_shutdown_quirk(tp); + + pci_wake_from_d3(pdev, tp->saved_wolopts); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + unregister_netdev(tp->dev); + + if (tp->dash_type != RTL_DASH_NONE) + rtl8168_driver_stop(tp); + + rtl_release_firmware(tp); + + /* restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_features_check = rtl8169_features_check, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static void rtl_set_irq_mask(struct rtl8169_private *tp) +{ + tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver; + else if (tp->mac_version == RTL_GIGA_MAC_VER_11) + /* special workaround needed */ + tp->irq_mask |= RxFIFOOver; + else + tp->irq_mask |= RxOverflow; +} + +static int rtl_alloc_irq(struct rtl8169_private *tp) +{ + unsigned int flags; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + rtl_unlock_config_regs(tp); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); + rtl_lock_config_regs(tp); + fallthrough; + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: + flags = PCI_IRQ_LEGACY; + break; + default: + flags = PCI_IRQ_ALL_TYPES; + break; + } + + return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); +} + +static void rtl_read_mac_address(struct rtl8169_private *tp, + u8 mac_addr[ETH_ALEN]) +{ + /* Get MAC address */ + if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) { + u32 value; + + value = rtl_eri_read(tp, 0xe0); + put_unaligned_le32(value, mac_addr); + value = rtl_eri_read(tp, 0xe4); + put_unaligned_le16(value, mac_addr + 4); + } else if (rtl_is_8125(tp)) { + rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP); + } +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + return RTL_R8(tp, MCU) & LINK_LIST_RDY; +} + +static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp) +{ + rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42); +} + +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_MAC_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = devm_mdiobus_register(&pdev->dev, new_bus); + if (ret) + return ret; + + tp->phydev = mdiobus_get_phy(new_bus, 0); + if (!tp->phydev) { + return -ENODEV; + } else if (!tp->phydev->drv) { + /* Most chip versions fail with the genphy driver. + * Therefore ensure that the dedicated PHY driver is loaded. + */ + dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n", + tp->phydev->phy_id); + return -EUNATCH; + } + + tp->phydev->mac_managed_pm = 1; + + phy_support_asym_pause(tp->phydev); + + /* PHY will be woken up in rtl_open() */ + phy_suspend(tp->phydev); + + return 0; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15)); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_init_8125(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0); + r8168_mac_ocp_write(tp, 0xc0a6, 0x0150); + r8168_mac_ocp_write(tp, 0xc01e, 0x5555); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53: + rtl8168ep_stop_cmac(tp); + fallthrough; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + rtl_hw_init_8125(tp); + break; + default: + break; + } +} + +static int rtl_jumbo_max(struct rtl8169_private *tp) +{ + /* Non-GBit versions don't support jumbo frames */ + if (!tp->supports_gmii) + return 0; + + switch (tp->mac_version) { + /* RTL8169 */ + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + return JUMBO_7K; + /* RTL8168b */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + return JUMBO_4K; + /* RTL8168c */ + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + return JUMBO_6K; + default: + return JUMBO_9K; + } +} + +static void rtl_disable_clk(void *data) +{ + clk_disable_unprepare(data); +} + +static int rtl_get_ether_clk(struct rtl8169_private *tp) +{ + struct device *d = tp_to_dev(tp); + struct clk *clk; + int rc; + + clk = devm_clk_get(d, "ether_clk"); + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -ENOENT) + /* clk-core allows NULL (for suspend / resume) */ + rc = 0; + else + dev_err_probe(d, rc, "failed to get clk\n"); + } else { + tp->clk = clk; + rc = clk_prepare_enable(clk); + if (rc) + dev_err(d, "failed to enable clk: %d\n", rc); + else + rc = devm_add_action_or_reset(d, rtl_disable_clk, clk); + } + + return rc; +} + +static void rtl_init_mac_address(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u8 *mac_addr = dev->dev_addr; + int rc; + + rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); + if (!rc) + goto done; + + rtl_read_mac_address(tp, mac_addr); + if (is_valid_ether_addr(mac_addr)) + goto done; + + rtl_read_mac_from_reg(tp, mac_addr, MAC0); + if (is_valid_ether_addr(mac_addr)) + goto done; + + eth_hw_addr_random(dev); + dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); +done: + rtl_rar_set(tp, mac_addr); +} + +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct rtl8169_private *tp; + int jumbo_max, region, rc; + enum mac_version chipset; + struct net_device *dev; + u16 xid; + + dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; + tp->eee_adv = -1; + tp->ocp_base = OCP_STD_PHY_BASE; + + dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, + struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + /* Get the *optional* external "ether_clk" used on some boards */ + rc = rtl_get_ether_clk(tp); + if (rc) + return rc; + + /* Disable ASPM L1 as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + */ + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1); + tp->aspm_manageable = !rc; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pcim_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "enable failure\n"); + return rc; + } + + if (pcim_set_mwi(pdev) < 0) + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); + + /* use first MMIO region */ + region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; + if (region < 0) { + dev_err(&pdev->dev, "no MMIO resource found\n"); + return -ENODEV; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); + return -ENODEV; + } + + rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME); + if (rc < 0) { + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); + return rc; + } + + tp->mmio_addr = pcim_iomap_table(pdev)[region]; + + xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf; + + /* Identify chip attached to board */ + chipset = rtl8169_get_mac_version(xid, tp->supports_gmii); + if (chipset == RTL_GIGA_MAC_NONE) { + dev_err(&pdev->dev, "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n", xid); + return -ENODEV; + } + + tp->mac_version = chipset; + + tp->dash_type = rtl_check_dash(tp); + + tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK; + + if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev->features |= NETIF_F_HIGHDMA; + + rtl_init_rxcfg(tp); + + rtl8169_irq_mask_and_ack(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rc = rtl_alloc_irq(tp); + if (rc < 0) { + dev_err(&pdev->dev, "Can't allocate interrupt\n"); + return rc; + } + + INIT_WORK(&tp->wk.work, rtl_task); + + rtl_init_mac_address(tp); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* Disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (rtl_chip_supports_csum_v2(tp)) + dev->hw_features |= NETIF_F_IPV6_CSUM; + + dev->features |= dev->hw_features; + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ + if (rtl_chip_supports_csum_v2(tp)) { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; + } else { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; + dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; + dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; + } + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* configure chip for default features */ + rtl8169_set_features(dev, dev->features); + + rtl_set_d3_pll_down(tp, true); + + jumbo_max = rtl_jumbo_max(tp); + if (jumbo_max) + dev->max_mtu = jumbo_max; + + rtl_set_irq_mask(tp); + + tp->fw_name = rtl_chip_infos[chipset].fw_name; + + tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, + GFP_KERNEL); + if (!tp->counters) + return -ENOMEM; + + pci_set_drvdata(pdev, tp); + + rc = r8169_mdio_register(tp); + if (rc) + return rc; + + rc = register_netdev(dev); + if (rc) + return rc; + + netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n", + rtl_chip_infos[chipset].name, dev->dev_addr, xid, + pci_irq_vector(pdev, 0)); + + if (jumbo_max) + netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n", + jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? + "ok" : "ko"); + + if (tp->dash_type != RTL_DASH_NONE) { + netdev_info(dev, "DASH enabled\n"); + rtl8168_driver_start(tp); + } + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, + .driver.pm = pm_ptr(&rtl8169_pm_ops), +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/devices/r8169/r8169_main-6.1-ethercat.c b/devices/r8169/r8169_main-6.1-ethercat.c new file mode 100644 index 00000000..82405979 --- /dev/null +++ b/devices/r8169/r8169_main-6.1-ethercat.c @@ -0,0 +1,5438 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "r8169-6.1-ethercat.h" +#include "r8169_firmware-6.1-ethercat.h" + + +#include "../ecdev.h" + + + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" +#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" +#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +#define MC_FILTER_LIMIT 32 + +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_RX_BUF_SIZE (SZ_16K - 1) +#define NUM_TX_DESC 256 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define OCP_STD_PHY_BASE 0xa400 + +#define RTL_CFG_NO_GBIT 1 + +/* write/read MMIO register */ +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg)) + +#define JUMBO_4K (4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) + +static const struct { + const char *name; + const char *fw_name; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, + [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, + [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, + [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, + [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, + [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" }, + [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, + [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, + [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, + [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, + [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, + [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, + [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, + [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, + [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, + [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, + [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, + [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, + [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3}, + [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2}, + [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 }, + [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, + [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, + [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, + [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", }, + [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, + /* reserve 62 for CFG_METHOD_4 in the vendor driver */ + [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, +}; + +static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502) }, + { PCI_VDEVICE(REALTEK, 0x2600) }, + { PCI_VDEVICE(REALTEK, 0x8129) }, + { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT }, + { PCI_VDEVICE(REALTEK, 0x8161) }, + { PCI_VDEVICE(REALTEK, 0x8162) }, + { PCI_VDEVICE(REALTEK, 0x8167) }, + { PCI_VDEVICE(REALTEK, 0x8168) }, + { PCI_VDEVICE(NCUBE, 0x8168) }, + { PCI_VDEVICE(REALTEK, 0x8169) }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 }, + { PCI_VDEVICE(DLINK, 0x4300) }, + { PCI_VDEVICE(DLINK, 0x4302) }, + { PCI_VDEVICE(AT, 0xc107) }, + { PCI_VDEVICE(USR, 0x0116) }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 }, + { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 }, + { PCI_VDEVICE(REALTEK, 0x8125) }, + { PCI_VDEVICE(REALTEK, 0x3000) }, + {} +}; + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + +#define RTL_COALESCE_TX_USECS GENMASK(15, 12) +#define RTL_COALESCE_TX_FRAMES GENMASK(11, 8) +#define RTL_COALESCE_RX_USECS GENMASK(7, 4) +#define RTL_COALESCE_RX_FRAMES GENMASK(3, 0) + +#define RTL_COALESCE_T_MAX 0x0fU +#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_T_MAX * 4) + + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, + FuncForceEvent = 0xfc, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0000f000 +#define CSIAR_ADDR_MASK 0x00000fff + PMCH = 0x6f, +#define D3COLD_NO_PLL_DOWN BIT(7) +#define D3HOT_NO_PLL_DOWN BIT(6) +#define D3_NO_PLL_DOWN (BIT(7) | BIT(6)) + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl8125_registers { + IntrMask_8125 = 0x38, + IntrStatus_8125 = 0x3c, + TxPoll_8125 = 0x90, + MAC0_BKP = 0x19e0, + EEE_TXIDLE_TIMER_8125 = 0x6048, +}; + +#define RX_VLAN_INNER_8125 BIT(22) +#define RX_VLAN_OUTER_8125 BIT(23) +#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125) + +#define RX_FETCH_DFLT_8125 (8 << 27) + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, +#define RX_CONFIG_ACCEPT_ERR_MASK 0x30 + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_OK_MASK 0x0f +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + EnAnaPLL = (1 << 14), // 8169 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), +#define INTT_MASK GENMASK(1, 0) +#define CPCMD_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* ResetCounterCommand */ + CounterReset = 0x1, + + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* First doubleword. */ + TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ + TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7f + + /* Second doubleword. */ +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ff +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ + TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 0/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + +#define RxCSFailMask (IPFail | UDPFail | TCPFail) + + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RTL_GSO_MAX_SIZE_V1 32000 +#define RTL_GSO_MAX_SEGS_V1 24 +#define RTL_GSO_MAX_SIZE_V2 64000 +#define RTL_GSO_MAX_SEGS_V2 64 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED = 0, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_MAX +}; + +enum rtl_dash_type { + RTL_DASH_NONE, + RTL_DASH_DP, + RTL_DASH_EP, +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + enum rtl_dash_type dash_type; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + u16 cp_cmd; + u32 irq_mask; + int irq; + struct clk *clk; + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct work_struct work; + } wk; + + unsigned supports_gmii:1; + unsigned aspm_manageable:1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + int eee_adv; + + const char *fw_name; + struct rtl_fw *rtl_fw; + + u32 ocp_base; + + ec_device_t *ecdev; + unsigned long ec_watchdog_jiffies; +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver (EtherCAT enabled)"); +MODULE_SOFTDEP("pre: realtek"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8168FP_3); +MODULE_FIRMWARE(FIRMWARE_8107E_2); +MODULE_FIRMWARE(FIRMWARE_8125A_3); +MODULE_FIRMWARE(FIRMWARE_8125B_2); + +static inline struct device *tp_to_dev(struct rtl8169_private *tp) +{ + return &tp->pci_dev->dev; +} + +static void rtl_lock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Lock); +} + +static void rtl_unlock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); +} + +static void rtl_pci_commit(struct rtl8169_private *tp) +{ + /* Read an arbitrary register to commit a preceding PCI write */ + RTL_R8(tp, ChipCmd); +} + +static bool rtl_is_8125(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_61; +} + +static bool rtl_is_8168evl_up(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_39 && + tp->mac_version <= RTL_GIGA_MAC_VER_53; +} + +static bool rtl_supports_eee(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_37 && + tp->mac_version != RTL_GIGA_MAC_VER_39; +} + +static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + mac[i] = RTL_R8(tp, reg + i); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + unsigned long usecs, int n, bool high) +{ + int i; + + for (i = 0; i < n; i++) { + if (c->check(tp) == high) + return true; + fsleep(usecs); + } + + if (net_ratelimit()) + netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n", + c->msg, !high, n, usecs); + return false; +} + +static bool rtl_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, true); +} + +static bool rtl_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) +{ + /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ + if (type == ERIAR_OOB && + (tp->mac_version == RTL_GIGA_MAC_VER_52 || + tp->mac_version == RTL_GIGA_MAC_VER_53)) + *cmd |= 0xf70 << 18; +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + return RTL_R32(tp, ERIAR) & ERIAR_FLAG; +} + +static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + u32 cmd = ERIAR_WRITE_CMD | type | mask | addr; + + if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask)) + return; + + RTL_W32(tp, ERIDR, val); + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val) +{ + _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC); +} + +static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr; + + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(tp, ERIDR) : ~0; +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr) +{ + return _rtl_eri_read(tp, addr, ERIAR_EXGMAC); +} + +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m) +{ + u32 val = rtl_eri_read(tp, addr); + + rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p); +} + +static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p) +{ + rtl_w0w1_eri(tp, addr, p, 0); +} + +static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m) +{ + rtl_w0w1_eri(tp, addr, 0, m); +} + +static bool rtl_ocp_reg_failure(u32 reg) +{ + return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg); +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, GPHY_OCP, reg << 15); + + return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, OCPDR, reg << 15); + + return RTL_R32(tp, OCPDR); +} + +static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, + u16 set) +{ + u16 data = r8168_mac_ocp_read(tp, reg); + + r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); +} + +/* Work around a hw issue with RTL8168g PHY, the quirk disables + * PHY MCU interrupts before PHY power-down. + */ +static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40: + if (value & BMCR_RESET || !(value & BMCR_PDOWN)) + rtl_eri_set_bits(tp, 0x1a8, 0xfc000000); + else + rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000); + break; + default: + break; + } +}; + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR) + rtl8168g_phy_suspend_quirk(tp, value); + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (reg == 0x1f) + return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4; + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + return RTL_R32(tp, PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + return RTL_R32(tp, OCPAR) & OCPAR_FLAG; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_2_mdio_start(tp); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(tp); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + /* Work around issue with chip reporting wrong PHY ID */ + if (reg == MII_PHYSID2) + return 0xc912; + + r8168dp_2_mdio_start(tp); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(tp); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, int val) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_2_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168g_mdio_write(tp, location, val); + break; + default: + r8169_mdio_write(tp, location, val); + break; + } +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_2_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + return r8168g_mdio_read(tp, location); + default: + return r8169_mdio_read(tp, location); + } +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff)); + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(tp, OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + return _rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + RTL_W32(tp, OCPDR, data); + RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd); + + r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_dp_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return r8168ep_ocp_read(tp, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + return RTL_R8(tp, IBISR0) & 0x20; +} + +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); + rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000); + RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); + RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_start(tp); + else + rtl8168ep_driver_start(tp); +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_stop(tp); + else + rtl8168ep_driver_stop(tp); +} + +static bool r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & BIT(15); +} + +static bool r8168ep_check_dash(struct rtl8169_private *tp) +{ + return r8168ep_ocp_read(tp, 0x128) & BIT(0); +} + +static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; + case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53: + return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE; + default: + return RTL_DASH_NONE; + } +} + +static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + if (enable) + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN); + else + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN); + break; + default: + break; + } +} + +static void rtl_reset_packet_filter(struct rtl8169_private *tp) +{ + rtl_eri_clear_bits(tp, 0xdc, BIT(0)); + rtl_eri_set_bits(tp, 0xdc, BIT(0)); +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; +} + +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u32 rtl_get_events(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + return RTL_R32(tp, IntrStatus_8125); + else + return RTL_R16(tp, IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u32 bits) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrStatus_8125, bits); + else + RTL_W16(tp, IntrStatus, bits); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, 0); + else + RTL_W16(tp, IntrMask, 0); +} + +static void rtl_irq_enable(struct rtl8169_private *tp) +{ + if (tp->ecdev) + return; + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, tp->irq_mask); + else + RTL_W16(tp, IntrMask, tp->irq_mask); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + rtl_irq_disable(tp); + rtl_ack_events(tp, 0xffffffff); + rtl_pci_commit(tp); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else if (phydev->speed == SPEED_100) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + rtl_reset_packet_filter(tp); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (phydev->speed == SPEED_10) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + } + } +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + wol->supported = WAKE_ANY; + wol->wolopts = tp->saved_wolopts; +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } + }; + unsigned int i, tmp = ARRAY_SIZE(cfg); + u8 options; + + rtl_unlock_config_regs(tp); + + if (rtl_is_8168evl_up(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2); + else + rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2); + } else if (rtl_is_8125(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0)); + else + r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); + } + + for (i = 0; i < tmp; i++) { + options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(tp, cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + options = RTL_R8(tp, Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(tp, Config1, options); + break; + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + options = RTL_R8(tp, Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(tp, Config2, options); + break; + default: + break; + } + + rtl_lock_config_regs(tp); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); + + if (tp->dash_type == RTL_DASH_NONE) { + rtl_set_d3_pll_down(tp, !wolopts); + tp->dev->wol_enabled = wolopts ? 1 : 0; + } +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + + tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); + + return 0; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (rtl_fw) + strscpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > ETH_DATA_LEN && + tp->mac_version > RTL_GIGA_MAC_VER_06) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO); + + return features; +} + +static void rtl_set_rx_config_features(struct rtl8169_private *tp, + netdev_features_t features) +{ + u32 rx_config = RTL_R32(tp, RxConfig); + + if (features & NETIF_F_RXALL) + rx_config |= RX_CONFIG_ACCEPT_ERR_MASK; + else + rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK; + + if (rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rx_config |= RX_VLAN_8125; + else + rx_config &= ~RX_VLAN_8125; + } + + RTL_W32(tp, RxConfig, rx_config); +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_set_rx_config_features(tp, features); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (!rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + } + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; + + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); +} + +static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) +{ + u32 cmd = lower_32_bits(tp->counters_phys_addr); + + RTL_W32(tp, CounterAddrHigh, upper_32_bits(tp->counters_phys_addr)); + rtl_pci_commit(tp); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); + + rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); +} + +static void rtl8169_update_counters(struct rtl8169_private *tp) +{ + u8 val = RTL_R8(tp, ChipCmd); + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. If 0xff chip may be in a PCI power-save state. + */ + if (val & CmdRxEnb && val != 0xff) + rtl8169_do_counters(tp, CounterDump); +} + +static void rtl8169_init_counter_offsets(struct rtl8169_private *tp) +{ + struct rtl8169_counters *counters = tp->counters; + + /* + * rtl8169_init_counter_offsets is called from rtl_open. On chip + * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only + * reset by a power cycle, while the counter values collected by the + * driver are reset at every driver unload/load cycle. + * + * To make sure the HW values returned by @get_stats64 match the SW + * values, we collect the initial values at first open(*) and use them + * as offsets to normalize the values returned by @get_stats64. + * + * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one + * for the reason stated in rtl8169_update_counters; CmdRxEnb is only + * set at open time by rtl_hw_start. + */ + + if (tp->tc_offset.inited) + return; + + if (tp->mac_version >= RTL_GIGA_MAC_VER_19) { + rtl8169_do_counters(tp, CounterReset); + } else { + rtl8169_update_counters(tp); + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.rx_missed = counters->rx_missed; + } + + tp->tc_offset.inited = true; +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters; + + counters = tp->counters; + rtl8169_update_counters(tp); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +/* + * Interrupt coalescing + * + * > 1 - the availability of the IntrMitigate (0xe2) register through the + * > 8169, 8168 and 810x line of chipsets + * + * 8169, 8168, and 8136(810x) serial chipsets support it. + * + * > 2 - the Tx timer unit at gigabit speed + * + * The unit of the timer depends on both the speed and the setting of CPlusCmd + * (0xe0) bit 1 and bit 0. + * + * For 8169 + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 320ns 2.56us 40.96us + * 0 1 2.56us 20.48us 327.7us + * 1 0 5.12us 40.96us 655.4us + * 1 1 10.24us 81.92us 1.31ms + * + * For the other + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 5us 2.56us 40.96us + * 0 1 40us 20.48us 327.7us + * 1 0 80us 40.96us 655.4us + * 1 1 160us 81.92us 1.31ms + */ + +/* rx/tx scale factors for all CPlusCmd[0:1] cases */ +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +/* produce array with base delay *1, *8, *8*2, *8*2*2 */ +#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) } + +static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { + { SPEED_1000, COALESCE_DELAY(320) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; + +static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { + { SPEED_1000, COALESCE_DELAY(5000) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; +#undef COALESCE_DELAY + +/* get rx/tx scale vector corresponding to current speed */ +static const struct rtl_coalesce_info * +rtl_coalesce_info(struct rtl8169_private *tp) +{ + const struct rtl_coalesce_info *ci; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + ci = rtl_coalesce_info_8169; + else + ci = rtl_coalesce_info_8168_8136; + + /* if speed is unknown assume highest one */ + if (tp->phydev->speed == SPEED_UNKNOWN) + return ci; + + for (; ci->speed; ci++) { + if (tp->phydev->speed == ci->speed) + return ci; + } + + return ERR_PTR(-ELNRNG); +} + +static int rtl_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rtl8169_private *tp = netdev_priv(dev); + const struct rtl_coalesce_info *ci; + u32 scale, c_us, c_fr; + u16 intrmit; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + memset(ec, 0, sizeof(*ec)); + + /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK]; + + intrmit = RTL_R16(tp, IntrMitigate); + + c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit); + ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit); + /* ethtool_coalesce states usecs and max_frames must not both be 0 */ + ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit); + ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit); + ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + return 0; +} + +/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */ +static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec, + u16 *cp01) +{ + const struct rtl_coalesce_info *ci; + u16 i; + + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + for (i = 0; i < 4; i++) { + if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) { + *cp01 = i; + return ci->scale_nsecs[i]; + } + } + + return -ERANGE; +} + +static int rtl_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 tx_fr = ec->tx_max_coalesced_frames; + u32 rx_fr = ec->rx_max_coalesced_frames; + u32 coal_usec_max, units; + u16 w = 0, cp01 = 0; + int scale; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX) + return -ERANGE; + + coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs); + scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01); + if (scale < 0) + return scale; + + /* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it + * not only when usecs=0 because of e.g. the following scenario: + * + * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX) + * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1 + * - then user does `ethtool -C eth0 rx-usecs 100` + * + * Since ethtool sends to kernel whole ethtool_coalesce settings, + * if we want to ignore rx_frames then it has to be set to 0. + */ + if (rx_fr == 1) + rx_fr = 0; + if (tx_fr == 1) + tx_fr = 0; + + /* HW requires time limit to be set if frame limit is set */ + if ((tx_fr && !ec->tx_coalesce_usecs) || + (rx_fr && !ec->rx_coalesce_usecs)) + return -EINVAL; + + w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4)); + w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4)); + + units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units); + units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units); + + RTL_W16(tp, IntrMitigate, w); + + /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */ + if (rtl_is_8168evl_up(tp)) { + if (!rx_fr && !tx_fr) + /* disable packet counter */ + tp->cp_cmd |= PktCntrDisable; + else + tp->cp_cmd &= ~PktCntrDisable; + } + + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + return phy_ethtool_get_eee(tp->phydev, data); +} + +static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_eee(tp->phydev, data); + + if (!ret) + tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, + MDIO_AN_EEE_ADV); + return ret; +} + +static void rtl8169_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *data, + struct kernel_ethtool_ringparam *kernel_data, + struct netlink_ext_ack *extack) +{ + data->rx_max_pending = NUM_RX_DESC; + data->rx_pending = NUM_RX_DESC; + data->tx_max_pending = NUM_TX_DESC; + data->tx_pending = NUM_TX_DESC; +} + +static void rtl8169_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + bool tx_pause, rx_pause; + + phy_get_pause(tp->phydev, &tx_pause, &rx_pause); + + data->autoneg = tp->phydev->autoneg; + data->tx_pause = tx_pause ? 1 : 0; + data->rx_pause = rx_pause ? 1 : 0; +} + +static int rtl8169_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN) + return -EOPNOTSUPP; + + phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause); + + return 0; +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_coalesce = rtl_get_coalesce, + .set_coalesce = rtl_set_coalesce, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, + .nway_reset = phy_ethtool_nway_reset, + .get_eee = rtl8169_get_eee, + .set_eee = rtl8169_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = rtl8169_get_ringparam, + .get_pauseparam = rtl8169_get_pauseparam, + .set_pauseparam = rtl8169_set_pauseparam, +}; + +static void rtl_enable_eee(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int adv; + + /* respect EEE advertisement the user may have set */ + if (tp->eee_adv >= 0) + adv = tp->eee_adv; + else + adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + + if (adv >= 0) + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); +} + +static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) +{ + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; + } mac_info[] = { + /* 8125B family. */ + { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, + + /* 8125A family. */ + { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 }, + /* It seems only XID 609 made it to the mass market. + * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, + * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + */ + + /* RTL8117 */ + { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 }, + { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 }, + + /* 8168EP family. */ + { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, + /* It seems this chip version never made it to + * the wild. Let's disable detection. + * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, + * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, + */ + + /* 8168H family. */ + { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 }, + /* It seems this chip version never made it to + * the wild. Let's disable detection. + * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, + */ + + /* 8168G family. */ + { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, + { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 }, + /* It seems this chip version never made it to + * the wild. Let's disable detection. + * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, + */ + { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 }, + { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, + { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 }, + { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 }, + { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 }, + { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + /* It seems this early RTL8168dp version never made it to + * the wild. Support has been removed. + * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, + */ + { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, + { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 }, + { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 }, + { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 }, + { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 }, + { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 }, + { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 }, + { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, + { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, + { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 }, + { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 }, + { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 }, + { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 }, + { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10 }, + + /* 8110 family. */ + { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 }, + { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 }, + { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 }, + { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 }, + { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 }, + + /* Catch-all */ + { 0x000, 0x000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + enum mac_version ver; + + while ((xid & p->mask) != p->val) + p++; + ver = p->ver; + + if (ver != RTL_GIGA_MAC_NONE && !gmii) { + if (ver == RTL_GIGA_MAC_VER_42) + ver = RTL_GIGA_MAC_VER_43; + else if (ver == RTL_GIGA_MAC_VER_46) + ver = RTL_GIGA_MAC_VER_48; + } + + return ver; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (tp->rtl_fw) { + rtl_fw_release_firmware(tp->rtl_fw); + kfree(tp->rtl_fw); + tp->rtl_fw = NULL; + } +} + +void r8169_apply_firmware(struct rtl8169_private *tp) +{ + int val; + + /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ + if (tp->rtl_fw) { + rtl_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + + /* PHY soft reset may still be in progress */ + phy_read_poll_timeout(tp->phydev, MII_BMCR, val, + !(val & BMCR_RESET), + 50000, 600000, true); + } +} + +static void rtl8168_config_eee_mac(struct rtl8169_private *tp) +{ + /* Adjust EEE LED frequency */ + if (tp->mac_version != RTL_GIGA_MAC_VER_38) + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + rtl_eri_set_bits(tp, 0x1b0, 0x0003); +} + +static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) +{ + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); + r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); +} + +static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp) +{ + RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20); +} + +static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) +{ + rtl8125_set_eee_txidle_timer(tp); + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr)); + rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4)); + rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16); + rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2)); +} + +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) +{ + u16 data1, data2, ioffset; + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data1 = r8168_mac_ocp_read(tp, 0xdd02); + data2 = r8168_mac_ocp_read(tp, 0xdd00); + + ioffset = (data2 >> 1) & 0x7ff8; + ioffset |= data2 & 0x0007; + if (data1 & BIT(7)) + ioffset |= BIT(15); + + return ioffset; +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + set_bit(flag, tp->wk.flags); + if (!tp->ecdev) + schedule_work(&tp->wk.work); +} + +static void rtl8169_init_phy(struct rtl8169_private *tp) +{ + r8169_hw_phy_config(tp, tp->phydev, tp->mac_version); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + /* set undocumented MAC Reg C+CR Offset 0x82h */ + RTL_W8(tp, 0x82, 0x01); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_05 && + tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE && + tp->pci_dev->subsystem_device == 0xe000) + phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b); + + /* We may have called phy_speed_down before */ + phy_speed_up(tp->phydev); + + if (rtl_supports_eee(tp)) + rtl_enable_eee(tp); + + genphy_soft_reset(tp->phydev); +} + +static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_unlock_config_regs(tp); + + RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4)); + rtl_pci_commit(tp); + + RTL_W32(tp, MAC0, get_unaligned_le32(addr)); + rtl_pci_commit(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + rtl_lock_config_regs(tp); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); + break; + default: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x24); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); +} + +static void rtl_jumbo_config(struct rtl8169_private *tp) +{ + bool jumbo = tp->dev->mtu > ETH_DATA_LEN; + int readrq = 4096; + + rtl_unlock_config_regs(tp); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_17: + if (jumbo) { + readrq = 512; + r8168b_1_hw_jumbo_enable(tp); + } else { + r8168b_1_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: + if (jumbo) { + readrq = 512; + r8168c_hw_jumbo_enable(tp); + } else { + r8168c_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_28: + if (jumbo) + r8168dp_hw_jumbo_enable(tp); + else + r8168dp_hw_jumbo_disable(tp); + break; + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: + if (jumbo) + r8168e_hw_jumbo_enable(tp); + else + r8168e_hw_jumbo_disable(tp); + break; + default: + break; + } + rtl_lock_config_regs(tp); + + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) + pcie_set_readrq(tp->pci_dev, readrq); + + /* Chip doesn't support pause in jumbo mode */ + if (jumbo) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising); + phy_start_aneg(tp->phydev); + } +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + return RTL_R8(tp, ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + RTL_W8(tp, ChipCmd, CmdReset); + + rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + + /* firmware loaded already or no firmware available */ + if (tp->rtl_fw || !tp->fw_name) + return; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + return; + + rtl_fw->phy_write = rtl_writephy; + rtl_fw->phy_read = rtl_readphy; + rtl_fw->mac_mcu_write = mac_mcu_write; + rtl_fw->mac_mcu_read = mac_mcu_read; + rtl_fw->fw_name = tp->fw_name; + rtl_fw->dev = tp_to_dev(tp); + + if (rtl_fw_request_firmware(rtl_fw)) + kfree(rtl_fw); + else + tp->rtl_fw = rtl_fw; +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + return RTL_R8(tp, TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond_2) +{ + /* IntrMitigate has new functionality on RTL8125 */ + return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103; +} + +static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61: + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_63: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); + break; + default: + break; + } +} + +static void rtl_disable_rxdvgate(struct rtl8169_private *tp) +{ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); +} + +static void rtl_enable_rxdvgate(struct rtl8169_private *tp) +{ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); + fsleep(2000); + rtl_wait_txrx_fifo_empty(tp); +} + +static void rtl_wol_enable_rx(struct rtl8169_private *tp) +{ + if (tp->mac_version >= RTL_GIGA_MAC_VER_25) + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + + if (tp->mac_version >= RTL_GIGA_MAC_VER_40) + rtl_disable_rxdvgate(tp); +} + +static void rtl_prepare_power_down(struct rtl8169_private *tp) +{ + if (tp->dash_type != RTL_DASH_NONE) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (device_may_wakeup(tp_to_dev(tp))) { + phy_speed_down(tp->phydev, false); + rtl_wol_enable_rx(tp); + } +} + +static void rtl_set_tx_config_registers(struct rtl8169_private *tp) +{ + u32 val = TX_DMA_BURST << TxDMAShift | + InterFrameGap << TxInterFrameGapShift; + + if (rtl_is_8168evl_up(tp)) + val |= TXCFG_AUTO_FIFO; + + RTL_W32(tp, TxConfig, val); +} + +static void rtl_set_rx_max_size(struct rtl8169_private *tp) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static void rtl8169_set_magic_reg(struct rtl8169_private *tp) +{ + u32 val; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + val = 0x000fff00; + else if (tp->mac_version == RTL_GIGA_MAC_VER_06) + val = 0x00ffff00; + else + return; + + if (RTL_R8(tp, Config2) & PCI_Clock_66MHz) + val |= 0xff; + + RTL_W32(tp, 0x7c, val); +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast; + /* Multicast hash filter */ + u32 mc_filter[2] = { 0xffffffff, 0xffffffff }; + struct rtl8169_private *tp = netdev_priv(dev); + u32 tmp; + + if (dev->flags & IFF_PROMISC) { + rx_mode |= AcceptAllPhys; + } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || + dev->flags & IFF_ALLMULTI || + tp->mac_version == RTL_GIGA_MAC_VER_35) { + /* accept all multicasts */ + } else if (netdev_mc_empty(dev)) { + rx_mode &= ~AcceptMulticast; + } else { + struct netdev_hw_addr *ha; + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + u32 bit_nr = eth_hw_addr_crc(ha) >> 26; + mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31); + } + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); + } + } + + RTL_W32(tp, MAR0 + 4, mc_filter[1]); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); + + tmp = RTL_R32(tp, RxConfig); + RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + return RTL_R32(tp, CSIAR) & CSIAR_FLAG; +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE | func << 16); + + rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | + CSIAR_BYTE_ENABLE); + + return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(tp, CSIDR) : ~0; +} + +static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) +{ + struct pci_dev *pdev = tp->pci_dev; + u32 csi; + + /* According to Realtek the value at config space address 0x070f + * controls the L0s/L1 entrance latency. We try standard ECAM access + * first and if it fails fall back to CSI. + * bit 0..2: L0: 0 = 1us, 1 = 2us .. 6 = 7us, 7 = 7us (no typo) + * bit 3..5: L1: 0 = 1us, 1 = 2us .. 6 = 64us, 7 = 64us + */ + if (pdev->cfg_size > 0x070f && + pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) + return; + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | val << 24); +} + +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) +{ + /* L0 7us, L1 16us */ + rtl_set_aspm_entry_latency(tp, 0x27); +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void __rtl_ephy_init(struct rtl8169_private *tp, + const struct ephy_info *e, int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a)) + +static void rtl_disable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) +{ + /* work around an issue when PCI reset occurs during L2/L3 state */ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); +} + +static void rtl_enable_exit_l1(struct rtl8169_private *tp) +{ + /* Bits control which events trigger ASPM L1 exit: + * Bit 12: rxdv + * Bit 11: ltr_msg + * Bit 10: txdma_poll + * Bit 9: xadm + * Bit 8: pktavi + * Bit 7: txpla + */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + break; + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38: + rtl_eri_set_bits(tp, 0xd4, 0x0c00); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80); + break; + default: + break; + } +} + +static void rtl_disable_exit_l1(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + rtl_eri_clear_bits(tp, 0xd4, 0x1f00); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0); + break; + default: + break; + } +} + +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + /* Don't enable ASPM in the chip if OS can't control ASPM */ + if (enable && tp->aspm_manageable) { + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + /* reset ephy tx/rx disable timer */ + r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0); + /* chip can trigger L1.2 */ + r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, BIT(2)); + break; + default: + break; + } + } else { + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0); + break; + default: + break; + } + + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } + + udelay(10); +} + +static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat, + u16 tx_stat, u16 rx_dyn, u16 tx_dyn) +{ + /* Usage of dynamic vs. static FIFO is controlled by bit + * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known. + */ + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn); +} + +static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp, + u8 low, u8 high) +{ + /* FIFO thresholds for pause flow control */ + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high); +} + +static void rtl_hw_start_8168b(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168cp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(tp, DBG_REG, 0x20); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0020 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168c_2); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, 0x0000, 0x0048 }, + { 0x19, 0x0020, 0x0050 }, + { 0x0c, 0x0100, 0x0020 }, + { 0x10, 0x0004, 0x0000 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168d_4); + + rtl_enable_clock_request(tp); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_1); + + rtl_disable_clock_request(tp); + + /* Reset tx FIFO pointer */ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); + + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_2); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_eri_set_bits(tp, 0x1d0, BIT(1)); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl8168_config_eee_mac(tp); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_disable(tp); + + rtl_ephy_init(tp, e_info_8168f_1); +} + +static void rtl_hw_start_8168g(struct rtl8169_private *tp) +{ + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f); + + rtl_disable_rxdvgate(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_1[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8000, 0x0000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_1); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 }, + { 0x00, 0xffff, 0x10a3 }, + { 0x06, 0xffff, 0xf050 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x4000, 0x0000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_2); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x00, 0x0000, 0x0080 }, + { 0x06, 0x0000, 0x0010 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x0000, 0x4000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8411_2); + + /* The following Realtek-provided magic fixes an issue with the RX unit + * getting confused after the PHY having been powered-down. + */ + r8168_mac_ocp_write(tp, 0xFC28, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + r8168_mac_ocp_write(tp, 0xFC30, 0x0000); + r8168_mac_ocp_write(tp, 0xFC32, 0x0000); + r8168_mac_ocp_write(tp, 0xFC34, 0x0000); + r8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + r8168_mac_ocp_write(tp, 0xFC26, 0x0000); + + r8168_mac_ocp_write(tp, 0xF800, 0xE008); + r8168_mac_ocp_write(tp, 0xF802, 0xE00A); + r8168_mac_ocp_write(tp, 0xF804, 0xE00C); + r8168_mac_ocp_write(tp, 0xF806, 0xE00E); + r8168_mac_ocp_write(tp, 0xF808, 0xE027); + r8168_mac_ocp_write(tp, 0xF80A, 0xE04F); + r8168_mac_ocp_write(tp, 0xF80C, 0xE05E); + r8168_mac_ocp_write(tp, 0xF80E, 0xE065); + r8168_mac_ocp_write(tp, 0xF810, 0xC602); + r8168_mac_ocp_write(tp, 0xF812, 0xBE00); + r8168_mac_ocp_write(tp, 0xF814, 0x0000); + r8168_mac_ocp_write(tp, 0xF816, 0xC502); + r8168_mac_ocp_write(tp, 0xF818, 0xBD00); + r8168_mac_ocp_write(tp, 0xF81A, 0x074C); + r8168_mac_ocp_write(tp, 0xF81C, 0xC302); + r8168_mac_ocp_write(tp, 0xF81E, 0xBB00); + r8168_mac_ocp_write(tp, 0xF820, 0x080A); + r8168_mac_ocp_write(tp, 0xF822, 0x6420); + r8168_mac_ocp_write(tp, 0xF824, 0x48C2); + r8168_mac_ocp_write(tp, 0xF826, 0x8C20); + r8168_mac_ocp_write(tp, 0xF828, 0xC516); + r8168_mac_ocp_write(tp, 0xF82A, 0x64A4); + r8168_mac_ocp_write(tp, 0xF82C, 0x49C0); + r8168_mac_ocp_write(tp, 0xF82E, 0xF009); + r8168_mac_ocp_write(tp, 0xF830, 0x74A2); + r8168_mac_ocp_write(tp, 0xF832, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF834, 0x74A0); + r8168_mac_ocp_write(tp, 0xF836, 0xC50E); + r8168_mac_ocp_write(tp, 0xF838, 0x9CA2); + r8168_mac_ocp_write(tp, 0xF83A, 0x1C11); + r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF83E, 0xE006); + r8168_mac_ocp_write(tp, 0xF840, 0x74F8); + r8168_mac_ocp_write(tp, 0xF842, 0x48C4); + r8168_mac_ocp_write(tp, 0xF844, 0x8CF8); + r8168_mac_ocp_write(tp, 0xF846, 0xC404); + r8168_mac_ocp_write(tp, 0xF848, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84A, 0xC403); + r8168_mac_ocp_write(tp, 0xF84C, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2); + r8168_mac_ocp_write(tp, 0xF850, 0x0C0A); + r8168_mac_ocp_write(tp, 0xF852, 0xE434); + r8168_mac_ocp_write(tp, 0xF854, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF856, 0x49D9); + r8168_mac_ocp_write(tp, 0xF858, 0xF01F); + r8168_mac_ocp_write(tp, 0xF85A, 0xC526); + r8168_mac_ocp_write(tp, 0xF85C, 0x64A5); + r8168_mac_ocp_write(tp, 0xF85E, 0x1400); + r8168_mac_ocp_write(tp, 0xF860, 0xF007); + r8168_mac_ocp_write(tp, 0xF862, 0x0C01); + r8168_mac_ocp_write(tp, 0xF864, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF866, 0x1C15); + r8168_mac_ocp_write(tp, 0xF868, 0xC51B); + r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF86C, 0xE013); + r8168_mac_ocp_write(tp, 0xF86E, 0xC519); + r8168_mac_ocp_write(tp, 0xF870, 0x74A0); + r8168_mac_ocp_write(tp, 0xF872, 0x48C4); + r8168_mac_ocp_write(tp, 0xF874, 0x8CA0); + r8168_mac_ocp_write(tp, 0xF876, 0xC516); + r8168_mac_ocp_write(tp, 0xF878, 0x74A4); + r8168_mac_ocp_write(tp, 0xF87A, 0x48C8); + r8168_mac_ocp_write(tp, 0xF87C, 0x48CA); + r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4); + r8168_mac_ocp_write(tp, 0xF880, 0xC512); + r8168_mac_ocp_write(tp, 0xF882, 0x1B00); + r8168_mac_ocp_write(tp, 0xF884, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF886, 0x1B1C); + r8168_mac_ocp_write(tp, 0xF888, 0x483F); + r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2); + r8168_mac_ocp_write(tp, 0xF88C, 0x1B04); + r8168_mac_ocp_write(tp, 0xF88E, 0xC508); + r8168_mac_ocp_write(tp, 0xF890, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF892, 0xC505); + r8168_mac_ocp_write(tp, 0xF894, 0xBD00); + r8168_mac_ocp_write(tp, 0xF896, 0xC502); + r8168_mac_ocp_write(tp, 0xF898, 0xBD00); + r8168_mac_ocp_write(tp, 0xF89A, 0x0300); + r8168_mac_ocp_write(tp, 0xF89C, 0x051E); + r8168_mac_ocp_write(tp, 0xF89E, 0xE434); + r8168_mac_ocp_write(tp, 0xF8A0, 0xE018); + r8168_mac_ocp_write(tp, 0xF8A2, 0xE092); + r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20); + r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F); + r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4); + r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3); + r8168_mac_ocp_write(tp, 0xF8AE, 0xF007); + r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0); + r8168_mac_ocp_write(tp, 0xF8B2, 0xF103); + r8168_mac_ocp_write(tp, 0xF8B4, 0xC607); + r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8B8, 0xC606); + r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8BC, 0xC602); + r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C); + r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28); + r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C); + r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00); + r8168_mac_ocp_write(tp, 0xF8C8, 0xC707); + r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00); + r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2); + r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1); + r8168_mac_ocp_write(tp, 0xF8D0, 0xC502); + r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA); + r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0); + r8168_mac_ocp_write(tp, 0xF8D8, 0xC502); + r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8DC, 0x0132); + + r8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + r8168_mac_ocp_write(tp, 0xFC2A, 0x0743); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0801); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9); + r8168_mac_ocp_write(tp, 0xFC30, 0x02FD); + r8168_mac_ocp_write(tp, 0xFC32, 0x0C25); + r8168_mac_ocp_write(tp, 0xFC34, 0x00A9); + r8168_mac_ocp_write(tp, 0xFC36, 0x012D); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x854a }, + { 0x01, 0xffff, 0x068b } + }; + int rg_saw_cnt; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168h_1); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xdc, 0x001c); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + rtl_disable_rxdvgate(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008); + r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + rtl_disable_rxdvgate(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0x0000, 0x0080 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_3); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8117(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8117[] = { + { 0x19, 0x0040, 0x1100 }, + { 0x59, 0x0040, 0x1100 }, + }; + int rg_saw_cnt; + + rtl8168ep_stop_cmac(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8117); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x0010); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + rtl_disable_rxdvgate(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_write(tp, 0xea80, 0x0003); + r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + /* firmware is for MAC only */ + r8169_apply_firmware(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, FIX_NAK_1); + + RTL_W8(tp, Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + cfg1 = RTL_R8(tp, Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(tp, Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8401(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8401[] = { + { 0x01, 0xffff, 0x6fe5 }, + { 0x03, 0xffff, 0x0599 }, + { 0x06, 0xffff, 0xaf25 }, + { 0x07, 0xffff, 0x8e68 }, + }; + + rtl_ephy_init(tp, e_info_8401); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402); + + rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + rtl_hw_aspm_clkreq_enable(tp, false); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + + /* L0 7us, L1 32us - needed to avoid issues with link-up detection */ + rtl_set_aspm_entry_latency(tp, 0x2f); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond) +{ + return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13); +} + +static void rtl_hw_start_8125_common(struct rtl8169_private *tp) +{ + rtl_pcie_state_l2l3_disable(tp); + + RTL_W16(tp, 0x382, 0x221b); + RTL_W8(tp, 0x4500, 0); + RTL_W16(tp, 0x4800, 0); + + /* disable UPS */ + r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10); + + r8168_mac_ocp_write(tp, 0xc140, 0xffff); + r8168_mac_ocp_write(tp, 0xc142, 0xffff); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + /* disable new tx descriptor format */ + r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); + else + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000); + else + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); + r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); + r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); + r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); + r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); + r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); + r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); + + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001); + udelay(1); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000); + RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030); + + r8168_mac_ocp_write(tp, 0xe098, 0xc302); + + rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + rtl8125b_config_eee_mac(tp); + else + rtl8125a_config_eee_mac(tp); + + rtl_disable_rxdvgate(tp); +} + +static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_2[] = { + { 0x04, 0xffff, 0xd000 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x23, 0xffff, 0xab66 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x29, 0xffff, 0xfe04 }, + + { 0x44, 0xffff, 0xd000 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x63, 0xffff, 0xab66 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x69, 0xffff, 0xfe04 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_2); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125b(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125b[] = { + { 0x0b, 0xffff, 0xa908 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x4b, 0xffff, 0xa908 }, + { 0x5e, 0xffff, 0x20eb }, + { 0x22, 0x0030, 0x0020 }, + { 0x62, 0x0030, 0x0020 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_aspm_clkreq_enable(tp, false); + + rtl_ephy_init(tp, e_info_8125b); + rtl_hw_start_8125_common(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_config(struct rtl8169_private *tp) +{ + static const rtl_generic_fct hw_configs[] = { + [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1, + [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, + [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, + [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, + [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1, + [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4, + [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2, + [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, + [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, + [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, + [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, + [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2, + [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402, + [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411, + [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106, + [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2, + [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, + [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, + }; + + if (hw_configs[tp->mac_version]) + hw_configs[tp->mac_version](tp); +} + +static void rtl_hw_start_8125(struct rtl8169_private *tp) +{ + int i; + + /* disable interrupt coalescing */ + for (i = 0xa00; i < 0xb00; i += 4) + RTL_W32(tp, i, 0); + + rtl_hw_config(tp); +} + +static void rtl_hw_start_8168(struct rtl8169_private *tp) +{ + if (rtl_is_8168evl_up(tp)) + RTL_W8(tp, MaxTxPacketSize, EarlySize); + else + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + rtl_hw_config(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start_8169(struct rtl8169_private *tp) +{ + RTL_W8(tp, EarlyTxThres, NoEarlyTx); + + tp->cp_cmd |= PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) + tp->cp_cmd |= EnAnaPLL; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start(struct rtl8169_private *tp) +{ + rtl_unlock_config_regs(tp); + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + rtl_hw_start_8169(tp); + else if (rtl_is_8125(tp)) + rtl_hw_start_8125(tp); + else + rtl_hw_start_8168(tp); + + rtl_enable_exit_l1(tp); + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + rtl_lock_config_regs(tp); + + rtl_jumbo_config(tp); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + rtl_pci_commit(tp); + + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_init_rxcfg(tp); + rtl_set_tx_config_registers(tp); + rtl_set_rx_config_features(tp, tp->dev->features); + rtl_set_rx_mode(tp->dev); + rtl_irq_enable(tp); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + dev->mtu = new_mtu; + netdev_update_features(dev); + rtl_jumbo_config(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + rtl8125_set_eee_txidle_timer(tp); + break; + default: + break; + } + + return 0; +} + +static void rtl8169_mark_to_asic(struct RxDesc *desc) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts2 = 0; + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE)); +} + +static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + struct device *d = tp_to_dev(tp); + int node = dev_to_node(d); + dma_addr_t mapping; + struct page *data; + + data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE)); + if (!data) + return NULL; + + mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + netdev_err(tp->dev, "Failed to map RX DMA!\n"); + __free_pages(data, get_order(R8169_RX_BUF_SIZE)); + return NULL; + } + + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc); + + return data; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) { + dma_unmap_page(tp_to_dev(tp), + le64_to_cpu(tp->RxDescArray[i].addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE)); + tp->Rx_databuff[i] = NULL; + tp->RxDescArray[i].addr = 0; + tp->RxDescArray[i].opts1 = 0; + } +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + struct page *data; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_rx_clear(tp); + return -ENOMEM; + } + tp->Rx_databuff[i] = data; + } + + /* mark as last descriptor in the ring */ + tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd); + + return 0; +} + +static int rtl8169_init_ring(struct rtl8169_private *tp) +{ + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); + memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry) +{ + struct ring_info *tx_skb = tp->tx_skb + entry; + struct TxDesc *desc = tp->TxDescArray + entry; + + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len, + DMA_TO_DEVICE); + memset(desc, 0, sizeof(*desc)); + memset(tx_skb, 0, sizeof(*tx_skb)); +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(tp, entry); + if (!tp->ecdev && skb) + dev_consume_skb_any(skb); + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + if (!tp->ecdev) + netdev_reset_queue(tp->dev); +} + +static void rtl8169_cleanup(struct rtl8169_private *tp) +{ + if (!tp->ecdev) + napi_disable(&tp->napi); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_net(); + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + rtl_enable_rxdvgate(tp); + fsleep(2000); + break; + default: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + fsleep(100); + break; + } + + rtl_hw_reset(tp); + + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + int i; + + if (!tp->ecdev) + netif_stop_queue(tp->dev); + + rtl8169_cleanup(tp); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i); + + if (!tp->ecdev) + napi_enable(&tp->napi); + rtl_hw_start(tp); +} + +static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len, + void *addr, unsigned int entry, bool desc_own) +{ + struct TxDesc *txd = tp->TxDescArray + entry; + struct device *d = tp_to_dev(tp); + dma_addr_t mapping; + u32 opts1; + int ret; + + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + ret = dma_mapping_error(d, mapping); + if (unlikely(ret)) { + if (net_ratelimit()) + netdev_err(tp->dev, "Failed to map TX data!\n"); + return ret; + } + + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts[1]); + + opts1 = opts[0] | len; + if (entry == NUM_TX_DESC - 1) + opts1 |= RingEnd; + if (desc_own) + opts1 |= DescOwn; + txd->opts1 = cpu_to_le32(opts1); + + tp->tx_skb[entry].len = len; + + return 0; +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + const u32 *opts, unsigned int entry) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag; + + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + void *addr = skb_frag_address(frag); + u32 len = skb_frag_size(frag); + + entry = (entry + 1) % NUM_TX_DESC; + + if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true))) + goto err_out; + } + + return 0; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_skb_is_udp(struct sk_buff *skb) +{ + int no = skb_network_offset(skb); + struct ipv6hdr *i6h, _i6h; + struct iphdr *ih, _ih; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih); + return ih && ih->protocol == IPPROTO_UDP; + case htons(ETH_P_IPV6): + i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h); + return i6h && i6h->nexthdr == IPPROTO_UDP; + default: + return false; + } +} + +#define RTL_MIN_PATCH_LEN 47 + +/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */ +static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto = 0, len = skb->len; + + if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && + rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { + unsigned int trans_data_len = skb_tail_pointer(skb) - + skb_transport_header(skb); + + if (trans_data_len >= offsetof(struct udphdr, len) && + trans_data_len < RTL_MIN_PATCH_LEN) { + u16 dest = ntohs(udp_hdr(skb)->dest); + + /* dest is a standard PTP port */ + if (dest == 319 || dest == 320) + padto = len + RTL_MIN_PATCH_LEN - trans_data_len; + } + + if (trans_data_len < sizeof(struct udphdr)) + padto = max_t(unsigned int, padto, + len + sizeof(struct udphdr) - trans_data_len); + } + + return padto; +} + +static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto; + + padto = rtl8125_quirk_udp_padto(tp, skb); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + padto = max_t(unsigned int, padto, ETH_ZLEN); + break; + default: + break; + } + + return padto; +} + +static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) +{ + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + opts[0] |= TD_LSO; + opts[0] |= mss << TD0_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[0] |= TD0_IP_CS | TD0_TCP_CS; + else if (ip->protocol == IPPROTO_UDP) + opts[0] |= TD0_IP_CS | TD0_UDP_CS; + else + WARN_ON_ONCE(1); + } +} + +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + u32 mss = shinfo->gso_size; + + if (mss) { + if (shinfo->gso_type & SKB_GSO_TCPV4) { + opts[0] |= TD1_GTSENV4; + } else if (shinfo->gso_type & SKB_GSO_TCPV6) { + if (skb_cow_head(skb, 0)) + return false; + + tcp_v6_gso_csum_prep(skb); + opts[0] |= TD1_GTSENV6; + } else { + WARN_ON_ONCE(1); + } + + opts[0] |= skb_transport_offset(skb) << GTTCPHO_SHIFT; + opts[1] |= mss << TD1_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_protocol; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + opts[1] |= TD1_IPv4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts[1] |= TD1_IPv6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + opts[1] |= TD1_TCP_CS; + else if (ip_protocol == IPPROTO_UDP) + opts[1] |= TD1_UDP_CS; + else + WARN_ON_ONCE(1); + + opts[1] |= skb_transport_offset(skb) << TCPHO_SHIFT; + } else { + unsigned int padto = rtl_quirk_packet_padto(tp, skb); + + /* skb_padto would free the skb on error */ + return !__skb_put_padto(skb, padto, false); + } + + return true; +} + +static bool rtl_tx_slots_avail(struct rtl8169_private *tp) +{ + unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC + - READ_ONCE(tp->cur_tx); + + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ + return slots_avail > MAX_SKB_FRAGS; +} + +/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ +static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + return false; + default: + return true; + } +} + +static void rtl8169_doorbell(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W16(tp, TxPoll_8125, BIT(0)); + else + RTL_W8(tp, TxPoll, NPQ); +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int frags = skb_shinfo(skb)->nr_frags; + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd_first, *txd_last; + bool stop_queue = 0, door_bell = 0; + u32 opts[2]; + + if (unlikely(!rtl_tx_slots_avail(tp))) { + if (net_ratelimit()) + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + opts[1] = rtl8169_tx_vlan_tag(skb); + opts[0] = 0; + + if (!rtl_chip_supports_csum_v2(tp)) + rtl8169_tso_csum_v1(skb, opts); + else if (!rtl8169_tso_csum_v2(tp, skb, opts)) + goto err_dma_0; + + if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data, + entry, false))) + goto err_dma_0; + + txd_first = tp->TxDescArray + entry; + + if (frags) { + if (rtl8169_xmit_frags(tp, skb, opts, entry)) + goto err_dma_1; + entry = (entry + frags) % NUM_TX_DESC; + } + + txd_last = tp->TxDescArray + entry; + txd_last->opts1 |= cpu_to_le32(LastFrag); + tp->tx_skb[entry].skb = skb; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + door_bell = tp->ecdev || __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + + txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag); + + /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */ + smp_wmb(); + + WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1); + + stop_queue = !tp->ecdev && !rtl_tx_slots_avail(tp); + if (unlikely(stop_queue)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb__after_atomic(); + if (rtl_tx_slots_avail(tp)) + netif_start_queue(dev); + door_bell = true; + } + + if (door_bell) + rtl8169_doorbell(tp); + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(tp, entry); +err_dma_0: + if (!tp->ecdev) + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + if (!tp->ecdev) + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static unsigned int rtl_last_frag_len(struct sk_buff *skb) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int nr_frags = info->nr_frags; + + if (!nr_frags) + return UINT_MAX; + + return skb_frag_size(info->frags + nr_frags - 1); +} + +/* Workaround for hw issues with TSO on RTL8168evl */ +static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb, + netdev_features_t features) +{ + /* IPv4 header has options field */ + if (vlan_get_protocol(skb) == htons(ETH_P_IP) && + ip_hdrlen(skb) > sizeof(struct iphdr)) + features &= ~NETIF_F_ALL_TSO; + + /* IPv4 TCP header has options field */ + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 && + tcp_hdrlen(skb) > sizeof(struct tcphdr)) + features &= ~NETIF_F_ALL_TSO; + + else if (rtl_last_frag_len(skb) <= 6) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static netdev_features_t rtl8169_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (skb_is_gso(skb)) { + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + features = rtl8168evl_fix_tso(skb, features); + + if (skb_transport_offset(skb) > GTTCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_ALL_TSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* work around hw bug on some chip versions */ + if (skb->len < ETH_ZLEN) + features &= ~NETIF_F_CSUM_MASK; + + if (rtl_quirk_packet_padto(tp, skb)) + features &= ~NETIF_F_CSUM_MASK; + + if (skb_transport_offset(skb) > TCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_CSUM_MASK; + } + + return vlan_features_check(skb, features); +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int pci_status_errs; + u16 pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + + pci_status_errs = pci_status_get_and_clear_errors(pdev); + + if (net_ratelimit()) + netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n", + pci_cmd, pci_status_errs); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, + int budget) +{ + unsigned int dirty_tx, bytes_compl = 0, pkts_compl = 0; + struct sk_buff *skb; + + dirty_tx = tp->dirty_tx; + + while (READ_ONCE(tp->cur_tx) != dirty_tx) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + u32 status; + + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + skb = tp->tx_skb[entry].skb; + rtl8169_unmap_tx_skb(tp, entry); + + if (skb) { + pkts_compl++; + bytes_compl += skb->len; + if (!tp->ecdev) + napi_consume_skb(skb, budget); + } + dirty_tx++; + } + + if (tp->dirty_tx != dirty_tx) { + if (!tp->ecdev) { + netdev_completed_queue(dev, pkts_compl, bytes_compl); + dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl); + } + + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_store_mb(tp->dirty_tx, dirty_tx); + if (!tp->ecdev && netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) + netif_wake_queue(dev); + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + * If skb is NULL then we come here again once a tx irq is + * triggered after the last fragment is marked transmitted. + */ + if (tp->cur_tx != dirty_tx && skb) + rtl8169_doorbell(tp); + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & (RxProtoMask | RxCSFailMask); + + if (status == RxProtoTCP || status == RxProtoUDP) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget) +{ + struct device *d = tp_to_dev(tp); + int count; + + for (count = 0; count < budget; count++, tp->cur_rx++) { + unsigned int pkt_size, entry = tp->cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + struct sk_buff *skb; + const void *rx_buf; + dma_addr_t addr; + u32 status; + + status = le32_to_cpu(desc->opts1); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Rx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + if (unlikely(status & RxRES)) { + if (net_ratelimit()) + netdev_warn(dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + + if (!(dev->features & NETIF_F_RXALL)) + goto release_descriptor; + else if (status & RxRWT || !(status & (RxRUNT | RxCRC))) + goto release_descriptor; + } + + pkt_size = status & GENMASK(13, 0); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size -= ETH_FCS_LEN; + + /* The driver does not support incoming fragmented frames. + * They are seen as a symptom of over-mtu sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + if (!tp->ecdev) { + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + } else { + skb = NULL; + } + + addr = le64_to_cpu(desc->addr); + rx_buf = page_address(tp->Rx_databuff[entry]); + + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(rx_buf); + if (tp->ecdev) { + ecdev_receive(tp->ecdev, rx_buf, pkt_size); + + // No need to detect link status as + // long as frames are received: Reset watchdog. + tp->ec_watchdog_jiffies = jiffies; + + } else { + skb_copy_to_linear_data(skb, rx_buf, pkt_size); + skb->tail += pkt_size; + skb->len = pkt_size; + } + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + if (!tp->ecdev) { + rtl8169_rx_csum(skb, status); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + napi_gro_receive(&tp->napi, skb); + + dev_sw_netstats_rx_add(dev, pkt_size); + } +release_descriptor: + rtl8169_mark_to_asic(desc); + } + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct rtl8169_private *tp = dev_instance; + u32 status = rtl_get_events(tp); + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return IRQ_NONE; + + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(tp->dev); + goto out; + } + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + if (unlikely(status & RxFIFOOver && + tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(tp->dev); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + } + + if (napi_schedule_prep(&tp->napi)) { + rtl_irq_disable(tp); + __napi_schedule(&tp->napi); + } +out: + rtl_ack_events(tp, status); + + return IRQ_HANDLED; +} + +static void rtl_task(struct work_struct *work) +{ + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + + rtnl_lock(); + + if (!netif_running(tp->dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) { + rtl_reset_work(tp); + netif_wake_queue(tp->dev); + } +out_unlock: + rtnl_unlock(); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + int work_done; + + rtl_tx(dev, tp, budget); + + work_done = rtl_rx(dev, tp, budget); + + if (work_done < budget && napi_complete_done(napi, work_done)) + rtl_irq_enable(tp); + + return work_done; +} + +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + phy_print_status(tp->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + phy_attached_info(phydev); + + return 0; +} + +static void rtl8169_down(struct rtl8169_private *tp) +{ + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + phy_stop(tp->phydev); + + rtl8169_update_counters(tp); + + pci_clear_master(tp->pci_dev); + rtl_pci_commit(tp); + + rtl8169_cleanup(tp); + rtl_disable_exit_l1(tp); + rtl_prepare_power_down(tp); +} + +static void rtl8169_up(struct rtl8169_private *tp) +{ + pci_set_master(tp->pci_dev); + phy_init_hw(tp->phydev); + phy_resume(tp->phydev); + rtl8169_init_phy(tp); + if (!tp->ecdev) + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_reset_work(tp); + + phy_start(tp->phydev); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + if (!tp->ecdev) + netif_stop_queue(dev); + rtl8169_down(tp); + rtl8169_rx_clear(tp); + + cancel_work_sync(&tp->wk.work); + + if (!tp->ecdev) + free_irq(tp->irq, tp); + + phy_disconnect(tp->phydev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(tp->irq, tp); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned long irqflags; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto out; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(tp); + if (retval < 0) + goto err_free_rx_1; + + rtl_request_firmware(tp); + + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED; + if (!tp->ecdev) { + retval = request_irq(tp->irq, rtl8169_interrupt, irqflags, + dev->name, tp); + if (retval < 0) + goto err_release_fw_2; + } + + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + + rtl8169_up(tp); + rtl8169_init_counter_offsets(tp); + if (!tp->ecdev) + netif_start_queue(dev); + else + ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + +out: + pm_runtime_put_sync(&pdev->dev); + + return retval; + +err_free_irq: + free_irq(tp->irq, tp); +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + goto out; +} + +static void +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + struct rtl8169_counters *counters = tp->counters; + + pm_runtime_get_noresume(&pdev->dev); + + netdev_stats_to_stats64(stats, &dev->stats); + dev_fetch_sw_netstats(stats, dev->tstats); + + /* + * Fetch additional counter values missing in stats collected by driver + * from tally counters. + */ + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(tp); + + /* + * Subtract values fetched during initalization. + * See rtl8169_init_counter_offsets for a description why we do that. + */ + stats->tx_errors = le64_to_cpu(counters->tx_errors) - + le64_to_cpu(tp->tc_offset.tx_errors); + stats->collisions = le32_to_cpu(counters->tx_multi_collision) - + le32_to_cpu(tp->tc_offset.tx_multi_collision); + stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - + le16_to_cpu(tp->tc_offset.tx_aborted); + stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) - + le16_to_cpu(tp->tc_offset.rx_missed); + + pm_runtime_put_noidle(&pdev->dev); +} + +static void rtl8169_net_suspend(struct rtl8169_private *tp) +{ + netif_device_detach(tp->dev); + + if (netif_running(tp->dev)) + rtl8169_down(tp); +} + +static int rtl8169_runtime_resume(struct device *dev) +{ + struct rtl8169_private *tp = dev_get_drvdata(dev); + + rtl_rar_set(tp, tp->dev->dev_addr); + __rtl8169_set_wol(tp, tp->saved_wolopts); + + if (tp->TxDescArray) + rtl8169_up(tp); + + netif_device_attach(tp->dev); + + return 0; +} + +static int rtl8169_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + rtnl_lock(); + rtl8169_net_suspend(tp); + if (!device_may_wakeup(tp_to_dev(tp))) + clk_disable_unprepare(tp->clk); + rtnl_unlock(); + + return 0; +} + +static int rtl8169_resume(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + if (!device_may_wakeup(tp_to_dev(tp))) + clk_prepare_enable(tp->clk); + + /* Reportedly at least Asus X453MA truncates packets otherwise */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37) + rtl_init_rxcfg(tp); + + return rtl8169_runtime_resume(device); +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + if (tp->ecdev) { + return -EBUSY; + } + + if (!tp->TxDescArray) { + netif_device_detach(tp->dev); + return 0; + } + + rtnl_lock(); + __rtl8169_set_wol(tp, WAKE_PHY); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (tp->dash_type != RTL_DASH_NONE) + return -EBUSY; + + if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev)) + pm_schedule_suspend(device, 10000); + + return -EBUSY; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume) + RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume, + rtl8169_runtime_idle) +}; + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + rtnl_lock(); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + /* Restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); + + if (system_state == SYSTEM_POWER_OFF && + tp->dash_type == RTL_DASH_NONE) { + pci_wake_from_d3(pdev, tp->saved_wolopts); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + if (tp->ecdev) { + ecdev_close(tp->ecdev); + ecdev_withdraw(tp->ecdev); + } else { + unregister_netdev(tp->dev); + } + + if (tp->dash_type != RTL_DASH_NONE) + rtl8168_driver_stop(tp); + + rtl_release_firmware(tp); + + /* restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_features_check = rtl8169_features_check, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static void rtl_set_irq_mask(struct rtl8169_private *tp) +{ + tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver; + else if (tp->mac_version == RTL_GIGA_MAC_VER_11) + /* special workaround needed */ + tp->irq_mask |= RxFIFOOver; + else + tp->irq_mask |= RxOverflow; +} + +static int rtl_alloc_irq(struct rtl8169_private *tp) +{ + unsigned int flags; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + rtl_unlock_config_regs(tp); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); + rtl_lock_config_regs(tp); + fallthrough; + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: + flags = PCI_IRQ_LEGACY; + break; + default: + flags = PCI_IRQ_ALL_TYPES; + break; + } + + return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); +} + +static void rtl_read_mac_address(struct rtl8169_private *tp, + u8 mac_addr[ETH_ALEN]) +{ + /* Get MAC address */ + if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) { + u32 value; + + value = rtl_eri_read(tp, 0xe0); + put_unaligned_le32(value, mac_addr); + value = rtl_eri_read(tp, 0xe4); + put_unaligned_le16(value, mac_addr + 4); + } else if (rtl_is_8125(tp)) { + rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP); + } +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + return RTL_R8(tp, MCU) & LINK_LIST_RDY; +} + +static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp) +{ + rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42); +} + +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_MAC_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = devm_mdiobus_register(&pdev->dev, new_bus); + if (ret) + return ret; + + tp->phydev = mdiobus_get_phy(new_bus, 0); + if (!tp->phydev) { + return -ENODEV; + } else if (!tp->phydev->drv) { + /* Most chip versions fail with the genphy driver. + * Therefore ensure that the dedicated PHY driver is loaded. + */ + dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n", + tp->phydev->phy_id); + return -EUNATCH; + } + + tp->phydev->mac_managed_pm = 1; + + phy_support_asym_pause(tp->phydev); + + /* PHY will be woken up in rtl_open() */ + phy_suspend(tp->phydev); + + return 0; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15)); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_init_8125(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0); + r8168_mac_ocp_write(tp, 0xc0a6, 0x0150); + r8168_mac_ocp_write(tp, 0xc01e, 0x5555); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53: + rtl8168ep_stop_cmac(tp); + fallthrough; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + rtl_hw_init_8125(tp); + break; + default: + break; + } +} + +static int rtl_jumbo_max(struct rtl8169_private *tp) +{ + /* Non-GBit versions don't support jumbo frames */ + if (!tp->supports_gmii) + return 0; + + switch (tp->mac_version) { + /* RTL8169 */ + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + return JUMBO_7K; + /* RTL8168b */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_17: + return JUMBO_4K; + /* RTL8168c */ + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + return JUMBO_6K; + default: + return JUMBO_9K; + } +} + +static void rtl_init_mac_address(struct rtl8169_private *tp) +{ + u8 mac_addr[ETH_ALEN] __aligned(2) = {}; + struct net_device *dev = tp->dev; + int rc; + + rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); + if (!rc) + goto done; + + rtl_read_mac_address(tp, mac_addr); + if (is_valid_ether_addr(mac_addr)) + goto done; + + rtl_read_mac_from_reg(tp, mac_addr, MAC0); + if (is_valid_ether_addr(mac_addr)) + goto done; + + eth_random_addr(mac_addr); + dev->addr_assign_type = NET_ADDR_RANDOM; + dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); +done: + eth_hw_addr_set(dev, mac_addr); + rtl_rar_set(tp, mac_addr); +} + +/* register is set if system vendor successfully tested ASPM 1.2 */ +static bool rtl_aspm_is_safe(struct rtl8169_private *tp) +{ + if (tp->mac_version >= RTL_GIGA_MAC_VER_61 && + r8168_mac_ocp_read(tp, 0xc0b2) & 0xf) + return true; + + return false; +} + +static void ec_poll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u16 status = rtl_get_events(tp); + + if (jiffies - tp->ec_watchdog_jiffies >= 2 * HZ) { + ecdev_set_link(tp->ecdev, netif_carrier_ok(dev)); + tp->ec_watchdog_jiffies = jiffies; + } + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return; + + rtl_tx(dev, tp, 100); + + rtl_rx(dev, tp, 100); + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + rtl_ack_events(tp, status); +} + + +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct rtl8169_private *tp; + int jumbo_max, region, rc; + enum mac_version chipset; + struct net_device *dev; + u16 xid; + + dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; + tp->eee_adv = -1; + tp->ocp_base = OCP_STD_PHY_BASE; + + dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, + struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + /* Get the *optional* external "ether_clk" used on some boards */ + tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk"); + if (IS_ERR(tp->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(tp->clk), "failed to get ether_clk\n"); + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pcim_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "enable failure\n"); + return rc; + } + + if (pcim_set_mwi(pdev) < 0) + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); + + /* use first MMIO region */ + region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; + if (region < 0) { + dev_err(&pdev->dev, "no MMIO resource found\n"); + return -ENODEV; + } + + rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME); + if (rc < 0) { + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); + return rc; + } + + tp->mmio_addr = pcim_iomap_table(pdev)[region]; + + xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf; + + /* Identify chip attached to board */ + chipset = rtl8169_get_mac_version(xid, tp->supports_gmii); + if (chipset == RTL_GIGA_MAC_NONE) { + dev_err(&pdev->dev, "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n", xid); + return -ENODEV; + } + + tp->mac_version = chipset; + + /* Disable ASPM L1 as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + * Chips from RTL8168h partially have issues with L1.2, but seem + * to work fine with L1 and L1.1. + */ + if (rtl_aspm_is_safe(tp)) + rc = 0; + else if (tp->mac_version >= RTL_GIGA_MAC_VER_46) + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); + else + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1); + tp->aspm_manageable = !rc; + + tp->dash_type = rtl_check_dash(tp); + + tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK; + + if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev->features |= NETIF_F_HIGHDMA; + + rtl_init_rxcfg(tp); + + rtl8169_irq_mask_and_ack(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rc = rtl_alloc_irq(tp); + if (rc < 0) { + dev_err(&pdev->dev, "Can't allocate interrupt\n"); + return rc; + } + tp->irq = pci_irq_vector(pdev, 0); + + INIT_WORK(&tp->wk.work, rtl_task); + + rtl_init_mac_address(tp); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + + netif_napi_add(dev, &tp->napi, rtl8169_poll); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* Disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (rtl_chip_supports_csum_v2(tp)) + dev->hw_features |= NETIF_F_IPV6_CSUM; + + dev->features |= dev->hw_features; + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ + if (rtl_chip_supports_csum_v2(tp)) { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V2); + netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V2); + } else { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; + netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V1); + netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V1); + } + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* configure chip for default features */ + rtl8169_set_features(dev, dev->features); + + if (tp->dash_type == RTL_DASH_NONE) { + rtl_set_d3_pll_down(tp, true); + } else { + rtl_set_d3_pll_down(tp, false); + dev->wol_enabled = 1; + } + + jumbo_max = rtl_jumbo_max(tp); + if (jumbo_max) + dev->max_mtu = jumbo_max; + + rtl_set_irq_mask(tp); + + tp->fw_name = rtl_chip_infos[chipset].fw_name; + + tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, + GFP_KERNEL); + if (!tp->counters) + return -ENOMEM; + + pci_set_drvdata(pdev, tp); + + rc = r8169_mdio_register(tp); + if (rc) + return rc; + + tp->ecdev = ecdev_offer(dev, ec_poll, THIS_MODULE); + tp->ec_watchdog_jiffies = jiffies; + + if (!tp->ecdev) { + rc = register_netdev(dev); + if (rc) + return rc; + } + + netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n", + rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq); + + if (jumbo_max) + netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n", + jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? + "ok" : "ko"); + + if (tp->dash_type != RTL_DASH_NONE) { + netdev_info(dev, "DASH enabled\n"); + rtl8168_driver_start(tp); + } + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_sync(&pdev->dev); + + if (tp->ecdev) { + rc = ecdev_open(tp->ecdev); + if (rc) { + ecdev_withdraw(tp->ecdev); + return rc; + } + } + + return 0; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, + .driver.pm = pm_ptr(&rtl8169_pm_ops), +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/devices/r8169/r8169_main-6.1-orig.c b/devices/r8169/r8169_main-6.1-orig.c new file mode 100644 index 00000000..cabed1b7 --- /dev/null +++ b/devices/r8169/r8169_main-6.1-orig.c @@ -0,0 +1,5344 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "r8169.h" +#include "r8169_firmware.h" + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" +#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" +#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +#define MC_FILTER_LIMIT 32 + +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_RX_BUF_SIZE (SZ_16K - 1) +#define NUM_TX_DESC 256 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define OCP_STD_PHY_BASE 0xa400 + +#define RTL_CFG_NO_GBIT 1 + +/* write/read MMIO register */ +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg)) + +#define JUMBO_4K (4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) + +static const struct { + const char *name; + const char *fw_name; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, + [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, + [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, + [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, + [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, + [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, + [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" }, + [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, + [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, + [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, + [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, + [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, + [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, + [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, + [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, + [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, + [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, + [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, + [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, + [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, + [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, + [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, + [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, + [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, + [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3}, + [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2}, + [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 }, + [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, + [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, + [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, + [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", }, + [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, + /* reserve 62 for CFG_METHOD_4 in the vendor driver */ + [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, +}; + +static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502) }, + { PCI_VDEVICE(REALTEK, 0x2600) }, + { PCI_VDEVICE(REALTEK, 0x8129) }, + { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT }, + { PCI_VDEVICE(REALTEK, 0x8161) }, + { PCI_VDEVICE(REALTEK, 0x8162) }, + { PCI_VDEVICE(REALTEK, 0x8167) }, + { PCI_VDEVICE(REALTEK, 0x8168) }, + { PCI_VDEVICE(NCUBE, 0x8168) }, + { PCI_VDEVICE(REALTEK, 0x8169) }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 }, + { PCI_VDEVICE(DLINK, 0x4300) }, + { PCI_VDEVICE(DLINK, 0x4302) }, + { PCI_VDEVICE(AT, 0xc107) }, + { PCI_VDEVICE(USR, 0x0116) }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 }, + { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 }, + { PCI_VDEVICE(REALTEK, 0x8125) }, + { PCI_VDEVICE(REALTEK, 0x3000) }, + {} +}; + +MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + +#define RTL_COALESCE_TX_USECS GENMASK(15, 12) +#define RTL_COALESCE_TX_FRAMES GENMASK(11, 8) +#define RTL_COALESCE_RX_USECS GENMASK(7, 4) +#define RTL_COALESCE_RX_FRAMES GENMASK(3, 0) + +#define RTL_COALESCE_T_MAX 0x0fU +#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_T_MAX * 4) + + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, + FuncForceEvent = 0xfc, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0000f000 +#define CSIAR_ADDR_MASK 0x00000fff + PMCH = 0x6f, +#define D3COLD_NO_PLL_DOWN BIT(7) +#define D3HOT_NO_PLL_DOWN BIT(6) +#define D3_NO_PLL_DOWN (BIT(7) | BIT(6)) + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl8125_registers { + IntrMask_8125 = 0x38, + IntrStatus_8125 = 0x3c, + TxPoll_8125 = 0x90, + MAC0_BKP = 0x19e0, + EEE_TXIDLE_TIMER_8125 = 0x6048, +}; + +#define RX_VLAN_INNER_8125 BIT(22) +#define RX_VLAN_OUTER_8125 BIT(23) +#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125) + +#define RX_FETCH_DFLT_8125 (8 << 27) + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, +#define RX_CONFIG_ACCEPT_ERR_MASK 0x30 + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_OK_MASK 0x0f +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + EnAnaPLL = (1 << 14), // 8169 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), +#define INTT_MASK GENMASK(1, 0) +#define CPCMD_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* ResetCounterCommand */ + CounterReset = 0x1, + + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* First doubleword. */ + TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ + TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7f + + /* Second doubleword. */ +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ff +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ + TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 0/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + +#define RxCSFailMask (IPFail | UDPFail | TCPFail) + + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RTL_GSO_MAX_SIZE_V1 32000 +#define RTL_GSO_MAX_SEGS_V1 24 +#define RTL_GSO_MAX_SIZE_V2 64000 +#define RTL_GSO_MAX_SEGS_V2 64 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; + __le16 rx_missed; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED = 0, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_MAX +}; + +enum rtl_dash_type { + RTL_DASH_NONE, + RTL_DASH_DP, + RTL_DASH_EP, +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct phy_device *phydev; + struct napi_struct napi; + enum mac_version mac_version; + enum rtl_dash_type dash_type; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + u16 cp_cmd; + u32 irq_mask; + int irq; + struct clk *clk; + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct work_struct work; + } wk; + + unsigned supports_gmii:1; + unsigned aspm_manageable:1; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + int eee_adv; + + const char *fw_name; + struct rtl_fw *rtl_fw; + + u32 ocp_base; +}; + +typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); +MODULE_SOFTDEP("pre: realtek"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8168FP_3); +MODULE_FIRMWARE(FIRMWARE_8107E_2); +MODULE_FIRMWARE(FIRMWARE_8125A_3); +MODULE_FIRMWARE(FIRMWARE_8125B_2); + +static inline struct device *tp_to_dev(struct rtl8169_private *tp) +{ + return &tp->pci_dev->dev; +} + +static void rtl_lock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Lock); +} + +static void rtl_unlock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); +} + +static void rtl_pci_commit(struct rtl8169_private *tp) +{ + /* Read an arbitrary register to commit a preceding PCI write */ + RTL_R8(tp, ChipCmd); +} + +static bool rtl_is_8125(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_61; +} + +static bool rtl_is_8168evl_up(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_39 && + tp->mac_version <= RTL_GIGA_MAC_VER_53; +} + +static bool rtl_supports_eee(struct rtl8169_private *tp) +{ + return tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_37 && + tp->mac_version != RTL_GIGA_MAC_VER_39; +} + +static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + mac[i] = RTL_R8(tp, reg + i); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + unsigned long usecs, int n, bool high) +{ + int i; + + for (i = 0; i < n; i++) { + if (c->check(tp) == high) + return true; + fsleep(usecs); + } + + if (net_ratelimit()) + netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n", + c->msg, !high, n, usecs); + return false; +} + +static bool rtl_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, true); +} + +static bool rtl_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + return rtl_loop_wait(tp, c, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) +{ + /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ + if (type == ERIAR_OOB && + (tp->mac_version == RTL_GIGA_MAC_VER_52 || + tp->mac_version == RTL_GIGA_MAC_VER_53)) + *cmd |= 0xf70 << 18; +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + return RTL_R32(tp, ERIAR) & ERIAR_FLAG; +} + +static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + u32 cmd = ERIAR_WRITE_CMD | type | mask | addr; + + if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask)) + return; + + RTL_W32(tp, ERIDR, val); + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val) +{ + _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC); +} + +static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr; + + r8168fp_adjust_ocp_cmd(tp, &cmd, type); + RTL_W32(tp, ERIAR, cmd); + + return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(tp, ERIDR) : ~0; +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr) +{ + return _rtl_eri_read(tp, addr, ERIAR_EXGMAC); +} + +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m) +{ + u32 val = rtl_eri_read(tp, addr); + + rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p); +} + +static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p) +{ + rtl_w0w1_eri(tp, addr, p, 0); +} + +static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m) +{ + rtl_w0w1_eri(tp, addr, 0, m); +} + +static bool rtl_ocp_reg_failure(u32 reg) +{ + return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg); +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, GPHY_OCP, reg << 15); + + return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + if (rtl_ocp_reg_failure(reg)) + return; + + RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + if (rtl_ocp_reg_failure(reg)) + return 0; + + RTL_W32(tp, OCPDR, reg << 15); + + return RTL_R32(tp, OCPDR); +} + +static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, + u16 set) +{ + u16 data = r8168_mac_ocp_read(tp, reg); + + r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); +} + +/* Work around a hw issue with RTL8168g PHY, the quirk disables + * PHY MCU interrupts before PHY power-down. + */ +static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40: + if (value & BMCR_RESET || !(value & BMCR_PDOWN)) + rtl_eri_set_bits(tp, 0x1a8, 0xfc000000); + else + rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000); + break; + default: + break; + } +}; + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR) + rtl8168g_phy_suspend_quirk(tp, value); + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (reg == 0x1f) + return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4; + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + return RTL_R32(tp, PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + return RTL_R32(tp, OCPAR) & OCPAR_FLAG; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(struct rtl8169_private *tp) +{ + RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_2_mdio_start(tp); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(tp); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + int value; + + /* Work around issue with chip reporting wrong PHY ID */ + if (reg == MII_PHYSID2) + return 0xc912; + + r8168dp_2_mdio_start(tp); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(tp); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, int val) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_2_mdio_write(tp, location, val); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168g_mdio_write(tp, location, val); + break; + default: + r8169_mdio_write(tp, location, val); + break; + } +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_2_mdio_read(tp, location); + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + return r8168g_mdio_read(tp, location); + default: + return r8169_mdio_read(tp, location); + } +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff)); + return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(tp, OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg) +{ + return _rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + RTL_W32(tp, OCPDR, data); + RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd); + + r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_dp_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return r8168ep_ocp_read(tp, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + return RTL_R8(tp, IBISR0) & 0x20; +} + +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01); + rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000); + RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20); + RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_start(tp); + else + rtl8168ep_driver_start(tp); +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); + rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + if (tp->dash_type == RTL_DASH_DP) + rtl8168dp_driver_stop(tp); + else + rtl8168ep_driver_stop(tp); +} + +static bool r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return r8168dp_ocp_read(tp, reg) & BIT(15); +} + +static bool r8168ep_check_dash(struct rtl8169_private *tp) +{ + return r8168ep_ocp_read(tp, 0x128) & BIT(0); +} + +static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; + case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53: + return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE; + default: + return RTL_DASH_NONE; + } +} + +static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + if (enable) + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN); + else + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN); + break; + default: + break; + } +} + +static void rtl_reset_packet_filter(struct rtl8169_private *tp) +{ + rtl_eri_clear_bits(tp, 0xdc, BIT(0)); + rtl_eri_set_bits(tp, 0xdc, BIT(0)); +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG; +} + +u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u32 rtl_get_events(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + return RTL_R32(tp, IntrStatus_8125); + else + return RTL_R16(tp, IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u32 bits) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrStatus_8125, bits); + else + RTL_W16(tp, IntrStatus, bits); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, 0); + else + RTL_W16(tp, IntrMask, 0); +} + +static void rtl_irq_enable(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W32(tp, IntrMask_8125, tp->irq_mask); + else + RTL_W16(tp, IntrMask, tp->irq_mask); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + rtl_irq_disable(tp); + rtl_ack_events(tp, 0xffffffff); + rtl_pci_commit(tp); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else if (phydev->speed == SPEED_100) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + rtl_reset_packet_filter(tp); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (phydev->speed == SPEED_1000) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (phydev->speed == SPEED_10) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + } + } +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + wol->supported = WAKE_ANY; + wol->wolopts = tp->saved_wolopts; +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } + }; + unsigned int i, tmp = ARRAY_SIZE(cfg); + u8 options; + + rtl_unlock_config_regs(tp); + + if (rtl_is_8168evl_up(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2); + else + rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2); + } else if (rtl_is_8125(tp)) { + tmp--; + if (wolopts & WAKE_MAGIC) + r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0)); + else + r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); + } + + for (i = 0; i < tmp; i++) { + options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(tp, cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + options = RTL_R8(tp, Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(tp, Config1, options); + break; + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + options = RTL_R8(tp, Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(tp, Config2, options); + break; + default: + break; + } + + rtl_lock_config_regs(tp); + + device_set_wakeup_enable(tp_to_dev(tp), wolopts); + + if (tp->dash_type == RTL_DASH_NONE) { + rtl_set_d3_pll_down(tp, !wolopts); + tp->dev->wol_enabled = wolopts ? 1 : 0; + } +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + + tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); + + return 0; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (rtl_fw) + strscpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > ETH_DATA_LEN && + tp->mac_version > RTL_GIGA_MAC_VER_06) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO); + + return features; +} + +static void rtl_set_rx_config_features(struct rtl8169_private *tp, + netdev_features_t features) +{ + u32 rx_config = RTL_R32(tp, RxConfig); + + if (features & NETIF_F_RXALL) + rx_config |= RX_CONFIG_ACCEPT_ERR_MASK; + else + rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK; + + if (rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rx_config |= RX_VLAN_8125; + else + rx_config &= ~RX_VLAN_8125; + } + + RTL_W32(tp, RxConfig, rx_config); +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_set_rx_config_features(tp, features); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (!rtl_is_8125(tp)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + } + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; + + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); +} + +static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) +{ + u32 cmd = lower_32_bits(tp->counters_phys_addr); + + RTL_W32(tp, CounterAddrHigh, upper_32_bits(tp->counters_phys_addr)); + rtl_pci_commit(tp); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | counter_cmd); + + rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); +} + +static void rtl8169_update_counters(struct rtl8169_private *tp) +{ + u8 val = RTL_R8(tp, ChipCmd); + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. If 0xff chip may be in a PCI power-save state. + */ + if (val & CmdRxEnb && val != 0xff) + rtl8169_do_counters(tp, CounterDump); +} + +static void rtl8169_init_counter_offsets(struct rtl8169_private *tp) +{ + struct rtl8169_counters *counters = tp->counters; + + /* + * rtl8169_init_counter_offsets is called from rtl_open. On chip + * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only + * reset by a power cycle, while the counter values collected by the + * driver are reset at every driver unload/load cycle. + * + * To make sure the HW values returned by @get_stats64 match the SW + * values, we collect the initial values at first open(*) and use them + * as offsets to normalize the values returned by @get_stats64. + * + * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one + * for the reason stated in rtl8169_update_counters; CmdRxEnb is only + * set at open time by rtl_hw_start. + */ + + if (tp->tc_offset.inited) + return; + + if (tp->mac_version >= RTL_GIGA_MAC_VER_19) { + rtl8169_do_counters(tp, CounterReset); + } else { + rtl8169_update_counters(tp); + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.rx_missed = counters->rx_missed; + } + + tp->tc_offset.inited = true; +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters; + + counters = tp->counters; + rtl8169_update_counters(tp); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +/* + * Interrupt coalescing + * + * > 1 - the availability of the IntrMitigate (0xe2) register through the + * > 8169, 8168 and 810x line of chipsets + * + * 8169, 8168, and 8136(810x) serial chipsets support it. + * + * > 2 - the Tx timer unit at gigabit speed + * + * The unit of the timer depends on both the speed and the setting of CPlusCmd + * (0xe0) bit 1 and bit 0. + * + * For 8169 + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 320ns 2.56us 40.96us + * 0 1 2.56us 20.48us 327.7us + * 1 0 5.12us 40.96us 655.4us + * 1 1 10.24us 81.92us 1.31ms + * + * For the other + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 5us 2.56us 40.96us + * 0 1 40us 20.48us 327.7us + * 1 0 80us 40.96us 655.4us + * 1 1 160us 81.92us 1.31ms + */ + +/* rx/tx scale factors for all CPlusCmd[0:1] cases */ +struct rtl_coalesce_info { + u32 speed; + u32 scale_nsecs[4]; +}; + +/* produce array with base delay *1, *8, *8*2, *8*2*2 */ +#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) } + +static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { + { SPEED_1000, COALESCE_DELAY(320) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; + +static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { + { SPEED_1000, COALESCE_DELAY(5000) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, + { 0 }, +}; +#undef COALESCE_DELAY + +/* get rx/tx scale vector corresponding to current speed */ +static const struct rtl_coalesce_info * +rtl_coalesce_info(struct rtl8169_private *tp) +{ + const struct rtl_coalesce_info *ci; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + ci = rtl_coalesce_info_8169; + else + ci = rtl_coalesce_info_8168_8136; + + /* if speed is unknown assume highest one */ + if (tp->phydev->speed == SPEED_UNKNOWN) + return ci; + + for (; ci->speed; ci++) { + if (tp->phydev->speed == ci->speed) + return ci; + } + + return ERR_PTR(-ELNRNG); +} + +static int rtl_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rtl8169_private *tp = netdev_priv(dev); + const struct rtl_coalesce_info *ci; + u32 scale, c_us, c_fr; + u16 intrmit; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + memset(ec, 0, sizeof(*ec)); + + /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK]; + + intrmit = RTL_R16(tp, IntrMitigate); + + c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit); + ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit); + /* ethtool_coalesce states usecs and max_frames must not both be 0 */ + ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit); + ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000); + + c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit); + ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1; + + return 0; +} + +/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */ +static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec, + u16 *cp01) +{ + const struct rtl_coalesce_info *ci; + u16 i; + + ci = rtl_coalesce_info(tp); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + for (i = 0; i < 4; i++) { + if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) { + *cp01 = i; + return ci->scale_nsecs[i]; + } + } + + return -ERANGE; +} + +static int rtl_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 tx_fr = ec->tx_max_coalesced_frames; + u32 rx_fr = ec->rx_max_coalesced_frames; + u32 coal_usec_max, units; + u16 w = 0, cp01 = 0; + int scale; + + if (rtl_is_8125(tp)) + return -EOPNOTSUPP; + + if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX) + return -ERANGE; + + coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs); + scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01); + if (scale < 0) + return scale; + + /* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it + * not only when usecs=0 because of e.g. the following scenario: + * + * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX) + * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1 + * - then user does `ethtool -C eth0 rx-usecs 100` + * + * Since ethtool sends to kernel whole ethtool_coalesce settings, + * if we want to ignore rx_frames then it has to be set to 0. + */ + if (rx_fr == 1) + rx_fr = 0; + if (tx_fr == 1) + tx_fr = 0; + + /* HW requires time limit to be set if frame limit is set */ + if ((tx_fr && !ec->tx_coalesce_usecs) || + (rx_fr && !ec->rx_coalesce_usecs)) + return -EINVAL; + + w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4)); + w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4)); + + units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units); + units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale); + w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units); + + RTL_W16(tp, IntrMitigate, w); + + /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */ + if (rtl_is_8168evl_up(tp)) { + if (!rx_fr && !tx_fr) + /* disable packet counter */ + tp->cp_cmd |= PktCntrDisable; + else + tp->cp_cmd &= ~PktCntrDisable; + } + + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_pci_commit(tp); + + return 0; +} + +static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + return phy_ethtool_get_eee(tp->phydev, data); +} + +static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + if (!rtl_supports_eee(tp)) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_eee(tp->phydev, data); + + if (!ret) + tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, + MDIO_AN_EEE_ADV); + return ret; +} + +static void rtl8169_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *data, + struct kernel_ethtool_ringparam *kernel_data, + struct netlink_ext_ack *extack) +{ + data->rx_max_pending = NUM_RX_DESC; + data->rx_pending = NUM_RX_DESC; + data->tx_max_pending = NUM_TX_DESC; + data->tx_pending = NUM_TX_DESC; +} + +static void rtl8169_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + bool tx_pause, rx_pause; + + phy_get_pause(tp->phydev, &tx_pause, &rx_pause); + + data->autoneg = tp->phydev->autoneg; + data->tx_pause = tx_pause ? 1 : 0; + data->rx_pause = rx_pause ? 1 : 0; +} + +static int rtl8169_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN) + return -EOPNOTSUPP; + + phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause); + + return 0; +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_coalesce = rtl_get_coalesce, + .set_coalesce = rtl_set_coalesce, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, + .nway_reset = phy_ethtool_nway_reset, + .get_eee = rtl8169_get_eee, + .set_eee = rtl8169_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ringparam = rtl8169_get_ringparam, + .get_pauseparam = rtl8169_get_pauseparam, + .set_pauseparam = rtl8169_set_pauseparam, +}; + +static void rtl_enable_eee(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int adv; + + /* respect EEE advertisement the user may have set */ + if (tp->eee_adv >= 0) + adv = tp->eee_adv; + else + adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + + if (adv >= 0) + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); +} + +static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) +{ + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u16 mask; + u16 val; + enum mac_version ver; + } mac_info[] = { + /* 8125B family. */ + { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, + + /* 8125A family. */ + { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 }, + /* It seems only XID 609 made it to the mass market. + * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, + * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + */ + + /* RTL8117 */ + { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 }, + { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 }, + + /* 8168EP family. */ + { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, + /* It seems this chip version never made it to + * the wild. Let's disable detection. + * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, + * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, + */ + + /* 8168H family. */ + { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 }, + /* It seems this chip version never made it to + * the wild. Let's disable detection. + * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, + */ + + /* 8168G family. */ + { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, + { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 }, + /* It seems this chip version never made it to + * the wild. Let's disable detection. + * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, + */ + { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 }, + { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, + { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 }, + { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 }, + { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 }, + { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + /* It seems this early RTL8168dp version never made it to + * the wild. Support has been removed. + * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, + */ + { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, + { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 }, + { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 }, + { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 }, + { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 }, + { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 }, + { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 }, + { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, + { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, + { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 }, + { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 }, + { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 }, + { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 }, + { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10 }, + + /* 8110 family. */ + { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 }, + { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 }, + { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 }, + { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 }, + { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 }, + + /* Catch-all */ + { 0x000, 0x000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + enum mac_version ver; + + while ((xid & p->mask) != p->val) + p++; + ver = p->ver; + + if (ver != RTL_GIGA_MAC_NONE && !gmii) { + if (ver == RTL_GIGA_MAC_VER_42) + ver = RTL_GIGA_MAC_VER_43; + else if (ver == RTL_GIGA_MAC_VER_46) + ver = RTL_GIGA_MAC_VER_48; + } + + return ver; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (tp->rtl_fw) { + rtl_fw_release_firmware(tp->rtl_fw); + kfree(tp->rtl_fw); + tp->rtl_fw = NULL; + } +} + +void r8169_apply_firmware(struct rtl8169_private *tp) +{ + int val; + + /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ + if (tp->rtl_fw) { + rtl_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + + /* PHY soft reset may still be in progress */ + phy_read_poll_timeout(tp->phydev, MII_BMCR, val, + !(val & BMCR_RESET), + 50000, 600000, true); + } +} + +static void rtl8168_config_eee_mac(struct rtl8169_private *tp) +{ + /* Adjust EEE LED frequency */ + if (tp->mac_version != RTL_GIGA_MAC_VER_38) + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + + rtl_eri_set_bits(tp, 0x1b0, 0x0003); +} + +static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) +{ + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); + r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); +} + +static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp) +{ + RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20); +} + +static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) +{ + rtl8125_set_eee_txidle_timer(tp); + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr)); + rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4)); + rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16); + rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2)); +} + +u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp) +{ + u16 data1, data2, ioffset; + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data1 = r8168_mac_ocp_read(tp, 0xdd02); + data2 = r8168_mac_ocp_read(tp, 0xdd00); + + ioffset = (data2 >> 1) & 0x7ff8; + ioffset |= data2 & 0x0007; + if (data1 & BIT(7)) + ioffset |= BIT(15); + + return ioffset; +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + set_bit(flag, tp->wk.flags); + schedule_work(&tp->wk.work); +} + +static void rtl8169_init_phy(struct rtl8169_private *tp) +{ + r8169_hw_phy_config(tp, tp->phydev, tp->mac_version); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + /* set undocumented MAC Reg C+CR Offset 0x82h */ + RTL_W8(tp, 0x82, 0x01); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_05 && + tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE && + tp->pci_dev->subsystem_device == 0xe000) + phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b); + + /* We may have called phy_speed_down before */ + phy_speed_up(tp->phydev); + + if (rtl_supports_eee(tp)) + rtl_enable_eee(tp); + + genphy_soft_reset(tp->phydev); +} + +static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr) +{ + rtl_unlock_config_regs(tp); + + RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4)); + rtl_pci_commit(tp); + + RTL_W32(tp, MAC0, get_unaligned_le32(addr)); + rtl_pci_commit(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + rtl_lock_config_regs(tp); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = eth_mac_addr(dev, p); + if (ret) + return ret; + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); + break; + default: + RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x24); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, 0x3f); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); +} + +static void rtl_jumbo_config(struct rtl8169_private *tp) +{ + bool jumbo = tp->dev->mtu > ETH_DATA_LEN; + int readrq = 4096; + + rtl_unlock_config_regs(tp); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_17: + if (jumbo) { + readrq = 512; + r8168b_1_hw_jumbo_enable(tp); + } else { + r8168b_1_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: + if (jumbo) { + readrq = 512; + r8168c_hw_jumbo_enable(tp); + } else { + r8168c_hw_jumbo_disable(tp); + } + break; + case RTL_GIGA_MAC_VER_28: + if (jumbo) + r8168dp_hw_jumbo_enable(tp); + else + r8168dp_hw_jumbo_disable(tp); + break; + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: + if (jumbo) + r8168e_hw_jumbo_enable(tp); + else + r8168e_hw_jumbo_disable(tp); + break; + default: + break; + } + rtl_lock_config_regs(tp); + + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) + pcie_set_readrq(tp->pci_dev, readrq); + + /* Chip doesn't support pause in jumbo mode */ + if (jumbo) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + tp->phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + tp->phydev->advertising); + phy_start_aneg(tp->phydev); + } +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + return RTL_R8(tp, ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + RTL_W8(tp, ChipCmd, CmdReset); + + rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + + /* firmware loaded already or no firmware available */ + if (tp->rtl_fw || !tp->fw_name) + return; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + return; + + rtl_fw->phy_write = rtl_writephy; + rtl_fw->phy_read = rtl_readphy; + rtl_fw->mac_mcu_write = mac_mcu_write; + rtl_fw->mac_mcu_read = mac_mcu_read; + rtl_fw->fw_name = tp->fw_name; + rtl_fw->dev = tp_to_dev(tp); + + if (rtl_fw_request_firmware(rtl_fw)) + kfree(rtl_fw); + else + tp->rtl_fw = rtl_fw; +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + return RTL_R8(tp, TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + return RTL_R32(tp, TxConfig) & TXCFG_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond_2) +{ + /* IntrMitigate has new functionality on RTL8125 */ + return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103; +} + +static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61: + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + break; + case RTL_GIGA_MAC_VER_63: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); + break; + default: + break; + } +} + +static void rtl_disable_rxdvgate(struct rtl8169_private *tp) +{ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); +} + +static void rtl_enable_rxdvgate(struct rtl8169_private *tp) +{ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN); + fsleep(2000); + rtl_wait_txrx_fifo_empty(tp); +} + +static void rtl_wol_enable_rx(struct rtl8169_private *tp) +{ + if (tp->mac_version >= RTL_GIGA_MAC_VER_25) + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + + if (tp->mac_version >= RTL_GIGA_MAC_VER_40) + rtl_disable_rxdvgate(tp); +} + +static void rtl_prepare_power_down(struct rtl8169_private *tp) +{ + if (tp->dash_type != RTL_DASH_NONE) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (device_may_wakeup(tp_to_dev(tp))) { + phy_speed_down(tp->phydev, false); + rtl_wol_enable_rx(tp); + } +} + +static void rtl_set_tx_config_registers(struct rtl8169_private *tp) +{ + u32 val = TX_DMA_BURST << TxDMAShift | + InterFrameGap << TxInterFrameGapShift; + + if (rtl_is_8168evl_up(tp)) + val |= TXCFG_AUTO_FIFO; + + RTL_W32(tp, TxConfig, val); +} + +static void rtl_set_rx_max_size(struct rtl8169_private *tp) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static void rtl8169_set_magic_reg(struct rtl8169_private *tp) +{ + u32 val; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + val = 0x000fff00; + else if (tp->mac_version == RTL_GIGA_MAC_VER_06) + val = 0x00ffff00; + else + return; + + if (RTL_R8(tp, Config2) & PCI_Clock_66MHz) + val |= 0xff; + + RTL_W32(tp, 0x7c, val); +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast; + /* Multicast hash filter */ + u32 mc_filter[2] = { 0xffffffff, 0xffffffff }; + struct rtl8169_private *tp = netdev_priv(dev); + u32 tmp; + + if (dev->flags & IFF_PROMISC) { + rx_mode |= AcceptAllPhys; + } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT || + dev->flags & IFF_ALLMULTI || + tp->mac_version == RTL_GIGA_MAC_VER_35) { + /* accept all multicasts */ + } else if (netdev_mc_empty(dev)) { + rx_mode &= ~AcceptMulticast; + } else { + struct netdev_hw_addr *ha; + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + u32 bit_nr = eth_hw_addr_crc(ha) >> 26; + mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31); + } + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); + } + } + + RTL_W32(tp, MAR0 + 4, mc_filter[1]); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); + + tmp = RTL_R32(tp, RxConfig); + RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + return RTL_R32(tp, CSIAR) & CSIAR_FLAG; +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIDR, value); + RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE | func << 16); + + rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | + CSIAR_BYTE_ENABLE); + + return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(tp, CSIDR) : ~0; +} + +static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) +{ + struct pci_dev *pdev = tp->pci_dev; + u32 csi; + + /* According to Realtek the value at config space address 0x070f + * controls the L0s/L1 entrance latency. We try standard ECAM access + * first and if it fails fall back to CSI. + * bit 0..2: L0: 0 = 1us, 1 = 2us .. 6 = 7us, 7 = 7us (no typo) + * bit 3..5: L1: 0 = 1us, 1 = 2us .. 6 = 64us, 7 = 64us + */ + if (pdev->cfg_size > 0x070f && + pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) + return; + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | val << 24); +} + +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) +{ + /* L0 7us, L1 16us */ + rtl_set_aspm_entry_latency(tp, 0x27); +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void __rtl_ephy_init(struct rtl8169_private *tp, + const struct ephy_info *e, int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a)) + +static void rtl_disable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct rtl8169_private *tp) +{ + pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) +{ + /* work around an issue when PCI reset occurs during L2/L3 state */ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); +} + +static void rtl_enable_exit_l1(struct rtl8169_private *tp) +{ + /* Bits control which events trigger ASPM L1 exit: + * Bit 12: rxdv + * Bit 11: ltr_msg + * Bit 10: txdma_poll + * Bit 9: xadm + * Bit 8: pktavi + * Bit 7: txpla + */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + break; + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38: + rtl_eri_set_bits(tp, 0xd4, 0x0c00); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80); + break; + default: + break; + } +} + +static void rtl_disable_exit_l1(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + rtl_eri_clear_bits(tp, 0xd4, 0x1f00); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0); + break; + default: + break; + } +} + +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + /* Don't enable ASPM in the chip if OS can't control ASPM */ + if (enable && tp->aspm_manageable) { + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + /* reset ephy tx/rx disable timer */ + r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0); + /* chip can trigger L1.2 */ + r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, BIT(2)); + break; + default: + break; + } + } else { + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0); + break; + default: + break; + } + + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } + + udelay(10); +} + +static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat, + u16 tx_stat, u16 rx_dyn, u16 tx_dyn) +{ + /* Usage of dynamic vs. static FIFO is controlled by bit + * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known. + */ + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn); +} + +static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp, + u8 low, u8 high) +{ + /* FIFO thresholds for pause flow control */ + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high); +} + +static void rtl_hw_start_8168b(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168cp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(tp, DBG_REG, 0x20); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0020 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168c_2); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_disable_clock_request(tp); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, 0x0000, 0x0048 }, + { 0x19, 0x0020, 0x0050 }, + { 0x0c, 0x0100, 0x0020 }, + { 0x10, 0x0004, 0x0000 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168d_4); + + rtl_enable_clock_request(tp); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_1); + + rtl_disable_clock_request(tp); + + /* Reset tx FIFO pointer */ + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); + + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0004 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + rtl_ephy_init(tp, e_info_8168e_2); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_eri_set_bits(tp, 0x1d0, BIT(1)); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, BIT(4)); + rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060); + + rtl_disable_clock_request(tp); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl8168_config_eee_mac(tp); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0000, 0x0224 }, + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + }; + + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_disable(tp); + + rtl_ephy_init(tp, e_info_8168f_1); +} + +static void rtl_hw_start_8168g(struct rtl8169_private *tp) +{ + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f); + + rtl_disable_rxdvgate(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_1[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8000, 0x0000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_1); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x3ff0, 0x0820 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 }, + { 0x00, 0xffff, 0x10a3 }, + { 0x06, 0xffff, 0xf050 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x4000, 0x0000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168g_2); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0008, 0x0000 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x00, 0x0000, 0x0080 }, + { 0x06, 0x0000, 0x0010 }, + { 0x04, 0x0000, 0x0010 }, + { 0x1d, 0x0000, 0x4000 }, + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8411_2); + + /* The following Realtek-provided magic fixes an issue with the RX unit + * getting confused after the PHY having been powered-down. + */ + r8168_mac_ocp_write(tp, 0xFC28, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + r8168_mac_ocp_write(tp, 0xFC30, 0x0000); + r8168_mac_ocp_write(tp, 0xFC32, 0x0000); + r8168_mac_ocp_write(tp, 0xFC34, 0x0000); + r8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + r8168_mac_ocp_write(tp, 0xFC26, 0x0000); + + r8168_mac_ocp_write(tp, 0xF800, 0xE008); + r8168_mac_ocp_write(tp, 0xF802, 0xE00A); + r8168_mac_ocp_write(tp, 0xF804, 0xE00C); + r8168_mac_ocp_write(tp, 0xF806, 0xE00E); + r8168_mac_ocp_write(tp, 0xF808, 0xE027); + r8168_mac_ocp_write(tp, 0xF80A, 0xE04F); + r8168_mac_ocp_write(tp, 0xF80C, 0xE05E); + r8168_mac_ocp_write(tp, 0xF80E, 0xE065); + r8168_mac_ocp_write(tp, 0xF810, 0xC602); + r8168_mac_ocp_write(tp, 0xF812, 0xBE00); + r8168_mac_ocp_write(tp, 0xF814, 0x0000); + r8168_mac_ocp_write(tp, 0xF816, 0xC502); + r8168_mac_ocp_write(tp, 0xF818, 0xBD00); + r8168_mac_ocp_write(tp, 0xF81A, 0x074C); + r8168_mac_ocp_write(tp, 0xF81C, 0xC302); + r8168_mac_ocp_write(tp, 0xF81E, 0xBB00); + r8168_mac_ocp_write(tp, 0xF820, 0x080A); + r8168_mac_ocp_write(tp, 0xF822, 0x6420); + r8168_mac_ocp_write(tp, 0xF824, 0x48C2); + r8168_mac_ocp_write(tp, 0xF826, 0x8C20); + r8168_mac_ocp_write(tp, 0xF828, 0xC516); + r8168_mac_ocp_write(tp, 0xF82A, 0x64A4); + r8168_mac_ocp_write(tp, 0xF82C, 0x49C0); + r8168_mac_ocp_write(tp, 0xF82E, 0xF009); + r8168_mac_ocp_write(tp, 0xF830, 0x74A2); + r8168_mac_ocp_write(tp, 0xF832, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF834, 0x74A0); + r8168_mac_ocp_write(tp, 0xF836, 0xC50E); + r8168_mac_ocp_write(tp, 0xF838, 0x9CA2); + r8168_mac_ocp_write(tp, 0xF83A, 0x1C11); + r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF83E, 0xE006); + r8168_mac_ocp_write(tp, 0xF840, 0x74F8); + r8168_mac_ocp_write(tp, 0xF842, 0x48C4); + r8168_mac_ocp_write(tp, 0xF844, 0x8CF8); + r8168_mac_ocp_write(tp, 0xF846, 0xC404); + r8168_mac_ocp_write(tp, 0xF848, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84A, 0xC403); + r8168_mac_ocp_write(tp, 0xF84C, 0xBC00); + r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2); + r8168_mac_ocp_write(tp, 0xF850, 0x0C0A); + r8168_mac_ocp_write(tp, 0xF852, 0xE434); + r8168_mac_ocp_write(tp, 0xF854, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF856, 0x49D9); + r8168_mac_ocp_write(tp, 0xF858, 0xF01F); + r8168_mac_ocp_write(tp, 0xF85A, 0xC526); + r8168_mac_ocp_write(tp, 0xF85C, 0x64A5); + r8168_mac_ocp_write(tp, 0xF85E, 0x1400); + r8168_mac_ocp_write(tp, 0xF860, 0xF007); + r8168_mac_ocp_write(tp, 0xF862, 0x0C01); + r8168_mac_ocp_write(tp, 0xF864, 0x8CA5); + r8168_mac_ocp_write(tp, 0xF866, 0x1C15); + r8168_mac_ocp_write(tp, 0xF868, 0xC51B); + r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0); + r8168_mac_ocp_write(tp, 0xF86C, 0xE013); + r8168_mac_ocp_write(tp, 0xF86E, 0xC519); + r8168_mac_ocp_write(tp, 0xF870, 0x74A0); + r8168_mac_ocp_write(tp, 0xF872, 0x48C4); + r8168_mac_ocp_write(tp, 0xF874, 0x8CA0); + r8168_mac_ocp_write(tp, 0xF876, 0xC516); + r8168_mac_ocp_write(tp, 0xF878, 0x74A4); + r8168_mac_ocp_write(tp, 0xF87A, 0x48C8); + r8168_mac_ocp_write(tp, 0xF87C, 0x48CA); + r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4); + r8168_mac_ocp_write(tp, 0xF880, 0xC512); + r8168_mac_ocp_write(tp, 0xF882, 0x1B00); + r8168_mac_ocp_write(tp, 0xF884, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF886, 0x1B1C); + r8168_mac_ocp_write(tp, 0xF888, 0x483F); + r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2); + r8168_mac_ocp_write(tp, 0xF88C, 0x1B04); + r8168_mac_ocp_write(tp, 0xF88E, 0xC508); + r8168_mac_ocp_write(tp, 0xF890, 0x9BA0); + r8168_mac_ocp_write(tp, 0xF892, 0xC505); + r8168_mac_ocp_write(tp, 0xF894, 0xBD00); + r8168_mac_ocp_write(tp, 0xF896, 0xC502); + r8168_mac_ocp_write(tp, 0xF898, 0xBD00); + r8168_mac_ocp_write(tp, 0xF89A, 0x0300); + r8168_mac_ocp_write(tp, 0xF89C, 0x051E); + r8168_mac_ocp_write(tp, 0xF89E, 0xE434); + r8168_mac_ocp_write(tp, 0xF8A0, 0xE018); + r8168_mac_ocp_write(tp, 0xF8A2, 0xE092); + r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20); + r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0); + r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F); + r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4); + r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3); + r8168_mac_ocp_write(tp, 0xF8AE, 0xF007); + r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0); + r8168_mac_ocp_write(tp, 0xF8B2, 0xF103); + r8168_mac_ocp_write(tp, 0xF8B4, 0xC607); + r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8B8, 0xC606); + r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8BC, 0xC602); + r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00); + r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C); + r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28); + r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C); + r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00); + r8168_mac_ocp_write(tp, 0xF8C8, 0xC707); + r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00); + r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2); + r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1); + r8168_mac_ocp_write(tp, 0xF8D0, 0xC502); + r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA); + r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0); + r8168_mac_ocp_write(tp, 0xF8D8, 0xC502); + r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00); + r8168_mac_ocp_write(tp, 0xF8DC, 0x0132); + + r8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + r8168_mac_ocp_write(tp, 0xFC2A, 0x0743); + r8168_mac_ocp_write(tp, 0xFC2C, 0x0801); + r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9); + r8168_mac_ocp_write(tp, 0xFC30, 0x02FD); + r8168_mac_ocp_write(tp, 0xFC32, 0x0C25); + r8168_mac_ocp_write(tp, 0xFC34, 0x00A9); + r8168_mac_ocp_write(tp, 0xFC36, 0x012D); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x854a }, + { 0x01, 0xffff, 0x068b } + }; + int rg_saw_cnt; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168h_1); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x38, 0x48); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xdc, 0x001c); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + rtl_disable_rxdvgate(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008); + r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + rtl_disable_rxdvgate(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0x0000, 0x0080 }, + { 0x0d, 0x0100, 0x0200 }, + { 0x19, 0x8021, 0x0000 }, + { 0x1e, 0x0000, 0x2000 }, + }; + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8168ep_3); + + rtl_hw_start_8168ep(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8117(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8117[] = { + { 0x19, 0x0040, 0x1100 }, + { 0x59, 0x0040, 0x1100 }, + }; + int rg_saw_cnt; + + rtl8168ep_stop_cmac(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8117); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, 0x0010); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + rtl_disable_rxdvgate(tp); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_write(tp, 0xea80, 0x0003); + r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + /* firmware is for MAC only */ + r8169_apply_firmware(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, DBG_REG, FIX_NAK_1); + + RTL_W8(tp, Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + cfg1 = RTL_R8(tp, Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(tp, Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8401(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8401[] = { + { 0x01, 0xffff, 0x6fe5 }, + { 0x03, 0xffff, 0x0599 }, + { 0x06, 0xffff, 0xaf25 }, + { 0x07, 0xffff, 0x8e68 }, + }; + + rtl_ephy_init(tp, e_info_8401); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402); + + rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + rtl_hw_aspm_clkreq_enable(tp, false); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); + + RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + + /* L0 7us, L1 32us - needed to avoid issues with link-up detection */ + rtl_set_aspm_entry_latency(tp, 0x2f); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); + + /* disable EEE */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); + + rtl_pcie_state_l2l3_disable(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond) +{ + return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13); +} + +static void rtl_hw_start_8125_common(struct rtl8169_private *tp) +{ + rtl_pcie_state_l2l3_disable(tp); + + RTL_W16(tp, 0x382, 0x221b); + RTL_W8(tp, 0x4500, 0); + RTL_W16(tp, 0x4800, 0); + + /* disable UPS */ + r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10); + + r8168_mac_ocp_write(tp, 0xc140, 0xffff); + r8168_mac_ocp_write(tp, 0xc142, 0xffff); + + r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9); + r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000); + r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080); + + /* disable new tx descriptor format */ + r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); + else + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000); + else + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); + r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); + r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); + r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); + r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); + r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); + r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); + + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001); + udelay(1); + r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000); + RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030); + + r8168_mac_ocp_write(tp, 0xe098, 0xc302); + + rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + rtl8125b_config_eee_mac(tp); + else + rtl8125a_config_eee_mac(tp); + + rtl_disable_rxdvgate(tp); +} + +static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125a_2[] = { + { 0x04, 0xffff, 0xd000 }, + { 0x0a, 0xffff, 0x8653 }, + { 0x23, 0xffff, 0xab66 }, + { 0x20, 0xffff, 0x9455 }, + { 0x21, 0xffff, 0x99ff }, + { 0x29, 0xffff, 0xfe04 }, + + { 0x44, 0xffff, 0xd000 }, + { 0x4a, 0xffff, 0x8653 }, + { 0x63, 0xffff, 0xab66 }, + { 0x60, 0xffff, 0x9455 }, + { 0x61, 0xffff, 0x99ff }, + { 0x69, 0xffff, 0xfe04 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8125a_2); + + rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_start_8125b(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125b[] = { + { 0x0b, 0xffff, 0xa908 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x4b, 0xffff, 0xa908 }, + { 0x5e, 0xffff, 0x20eb }, + { 0x22, 0x0030, 0x0020 }, + { 0x62, 0x0030, 0x0020 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_aspm_clkreq_enable(tp, false); + + rtl_ephy_init(tp, e_info_8125b); + rtl_hw_start_8125_common(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + +static void rtl_hw_config(struct rtl8169_private *tp) +{ + static const rtl_generic_fct hw_configs[] = { + [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1, + [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, + [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, + [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, + [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, + [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1, + [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4, + [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2, + [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, + [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, + [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, + [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, + [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2, + [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402, + [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411, + [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106, + [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2, + [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117, + [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, + [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, + }; + + if (hw_configs[tp->mac_version]) + hw_configs[tp->mac_version](tp); +} + +static void rtl_hw_start_8125(struct rtl8169_private *tp) +{ + int i; + + /* disable interrupt coalescing */ + for (i = 0xa00; i < 0xb00; i += 4) + RTL_W32(tp, i, 0); + + rtl_hw_config(tp); +} + +static void rtl_hw_start_8168(struct rtl8169_private *tp) +{ + if (rtl_is_8168evl_up(tp)) + RTL_W8(tp, MaxTxPacketSize, EarlySize); + else + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + rtl_hw_config(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start_8169(struct rtl8169_private *tp) +{ + RTL_W8(tp, EarlyTxThres, NoEarlyTx); + + tp->cp_cmd |= PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) + tp->cp_cmd |= EnAnaPLL; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(tp); + + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); +} + +static void rtl_hw_start(struct rtl8169_private *tp) +{ + rtl_unlock_config_regs(tp); + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + rtl_hw_start_8169(tp); + else if (rtl_is_8125(tp)) + rtl_hw_start_8125(tp); + else + rtl_hw_start_8168(tp); + + rtl_enable_exit_l1(tp); + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + rtl_lock_config_regs(tp); + + rtl_jumbo_config(tp); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + rtl_pci_commit(tp); + + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_init_rxcfg(tp); + rtl_set_tx_config_registers(tp); + rtl_set_rx_config_features(tp, tp->dev->features); + rtl_set_rx_mode(tp->dev); + rtl_irq_enable(tp); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + dev->mtu = new_mtu; + netdev_update_features(dev); + rtl_jumbo_config(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + rtl8125_set_eee_txidle_timer(tp); + break; + default: + break; + } + + return 0; +} + +static void rtl8169_mark_to_asic(struct RxDesc *desc) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts2 = 0; + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE)); +} + +static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + struct device *d = tp_to_dev(tp); + int node = dev_to_node(d); + dma_addr_t mapping; + struct page *data; + + data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE)); + if (!data) + return NULL; + + mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + netdev_err(tp->dev, "Failed to map RX DMA!\n"); + __free_pages(data, get_order(R8169_RX_BUF_SIZE)); + return NULL; + } + + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc); + + return data; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) { + dma_unmap_page(tp_to_dev(tp), + le64_to_cpu(tp->RxDescArray[i].addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE)); + tp->Rx_databuff[i] = NULL; + tp->RxDescArray[i].addr = 0; + tp->RxDescArray[i].opts1 = 0; + } +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + struct page *data; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_rx_clear(tp); + return -ENOMEM; + } + tp->Rx_databuff[i] = data; + } + + /* mark as last descriptor in the ring */ + tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd); + + return 0; +} + +static int rtl8169_init_ring(struct rtl8169_private *tp) +{ + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); + memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry) +{ + struct ring_info *tx_skb = tp->tx_skb + entry; + struct TxDesc *desc = tp->TxDescArray + entry; + + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len, + DMA_TO_DEVICE); + memset(desc, 0, sizeof(*desc)); + memset(tx_skb, 0, sizeof(*tx_skb)); +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(tp, entry); + if (skb) + dev_consume_skb_any(skb); + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + netdev_reset_queue(tp->dev); +} + +static void rtl8169_cleanup(struct rtl8169_private *tp) +{ + napi_disable(&tp->napi); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_net(); + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + rtl_enable_rxdvgate(tp); + fsleep(2000); + break; + default: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + fsleep(100); + break; + } + + rtl_hw_reset(tp); + + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + int i; + + netif_stop_queue(tp->dev); + + rtl8169_cleanup(tp); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i); + + napi_enable(&tp->napi); + rtl_hw_start(tp); +} + +static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len, + void *addr, unsigned int entry, bool desc_own) +{ + struct TxDesc *txd = tp->TxDescArray + entry; + struct device *d = tp_to_dev(tp); + dma_addr_t mapping; + u32 opts1; + int ret; + + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + ret = dma_mapping_error(d, mapping); + if (unlikely(ret)) { + if (net_ratelimit()) + netdev_err(tp->dev, "Failed to map TX data!\n"); + return ret; + } + + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts[1]); + + opts1 = opts[0] | len; + if (entry == NUM_TX_DESC - 1) + opts1 |= RingEnd; + if (desc_own) + opts1 |= DescOwn; + txd->opts1 = cpu_to_le32(opts1); + + tp->tx_skb[entry].len = len; + + return 0; +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + const u32 *opts, unsigned int entry) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag; + + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + void *addr = skb_frag_address(frag); + u32 len = skb_frag_size(frag); + + entry = (entry + 1) % NUM_TX_DESC; + + if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true))) + goto err_out; + } + + return 0; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_skb_is_udp(struct sk_buff *skb) +{ + int no = skb_network_offset(skb); + struct ipv6hdr *i6h, _i6h; + struct iphdr *ih, _ih; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih); + return ih && ih->protocol == IPPROTO_UDP; + case htons(ETH_P_IPV6): + i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h); + return i6h && i6h->nexthdr == IPPROTO_UDP; + default: + return false; + } +} + +#define RTL_MIN_PATCH_LEN 47 + +/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */ +static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto = 0, len = skb->len; + + if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && + rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { + unsigned int trans_data_len = skb_tail_pointer(skb) - + skb_transport_header(skb); + + if (trans_data_len >= offsetof(struct udphdr, len) && + trans_data_len < RTL_MIN_PATCH_LEN) { + u16 dest = ntohs(udp_hdr(skb)->dest); + + /* dest is a standard PTP port */ + if (dest == 319 || dest == 320) + padto = len + RTL_MIN_PATCH_LEN - trans_data_len; + } + + if (trans_data_len < sizeof(struct udphdr)) + padto = max_t(unsigned int, padto, + len + sizeof(struct udphdr) - trans_data_len); + } + + return padto; +} + +static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto; + + padto = rtl8125_quirk_udp_padto(tp, skb); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + padto = max_t(unsigned int, padto, ETH_ZLEN); + break; + default: + break; + } + + return padto; +} + +static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) +{ + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + opts[0] |= TD_LSO; + opts[0] |= mss << TD0_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[0] |= TD0_IP_CS | TD0_TCP_CS; + else if (ip->protocol == IPPROTO_UDP) + opts[0] |= TD0_IP_CS | TD0_UDP_CS; + else + WARN_ON_ONCE(1); + } +} + +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + u32 mss = shinfo->gso_size; + + if (mss) { + if (shinfo->gso_type & SKB_GSO_TCPV4) { + opts[0] |= TD1_GTSENV4; + } else if (shinfo->gso_type & SKB_GSO_TCPV6) { + if (skb_cow_head(skb, 0)) + return false; + + tcp_v6_gso_csum_prep(skb); + opts[0] |= TD1_GTSENV6; + } else { + WARN_ON_ONCE(1); + } + + opts[0] |= skb_transport_offset(skb) << GTTCPHO_SHIFT; + opts[1] |= mss << TD1_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_protocol; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + opts[1] |= TD1_IPv4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts[1] |= TD1_IPv6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + opts[1] |= TD1_TCP_CS; + else if (ip_protocol == IPPROTO_UDP) + opts[1] |= TD1_UDP_CS; + else + WARN_ON_ONCE(1); + + opts[1] |= skb_transport_offset(skb) << TCPHO_SHIFT; + } else { + unsigned int padto = rtl_quirk_packet_padto(tp, skb); + + /* skb_padto would free the skb on error */ + return !__skb_put_padto(skb, padto, false); + } + + return true; +} + +static bool rtl_tx_slots_avail(struct rtl8169_private *tp) +{ + unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC + - READ_ONCE(tp->cur_tx); + + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ + return slots_avail > MAX_SKB_FRAGS; +} + +/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ +static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: + return false; + default: + return true; + } +} + +static void rtl8169_doorbell(struct rtl8169_private *tp) +{ + if (rtl_is_8125(tp)) + RTL_W16(tp, TxPoll_8125, BIT(0)); + else + RTL_W8(tp, TxPoll, NPQ); +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int frags = skb_shinfo(skb)->nr_frags; + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd_first, *txd_last; + bool stop_queue, door_bell; + u32 opts[2]; + + if (unlikely(!rtl_tx_slots_avail(tp))) { + if (net_ratelimit()) + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + opts[1] = rtl8169_tx_vlan_tag(skb); + opts[0] = 0; + + if (!rtl_chip_supports_csum_v2(tp)) + rtl8169_tso_csum_v1(skb, opts); + else if (!rtl8169_tso_csum_v2(tp, skb, opts)) + goto err_dma_0; + + if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data, + entry, false))) + goto err_dma_0; + + txd_first = tp->TxDescArray + entry; + + if (frags) { + if (rtl8169_xmit_frags(tp, skb, opts, entry)) + goto err_dma_1; + entry = (entry + frags) % NUM_TX_DESC; + } + + txd_last = tp->TxDescArray + entry; + txd_last->opts1 |= cpu_to_le32(LastFrag); + tp->tx_skb[entry].skb = skb; + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more()); + + txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag); + + /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */ + smp_wmb(); + + WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1); + + stop_queue = !rtl_tx_slots_avail(tp); + if (unlikely(stop_queue)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb__after_atomic(); + if (rtl_tx_slots_avail(tp)) + netif_start_queue(dev); + door_bell = true; + } + + if (door_bell) + rtl8169_doorbell(tp); + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(tp, entry); +err_dma_0: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static unsigned int rtl_last_frag_len(struct sk_buff *skb) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int nr_frags = info->nr_frags; + + if (!nr_frags) + return UINT_MAX; + + return skb_frag_size(info->frags + nr_frags - 1); +} + +/* Workaround for hw issues with TSO on RTL8168evl */ +static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb, + netdev_features_t features) +{ + /* IPv4 header has options field */ + if (vlan_get_protocol(skb) == htons(ETH_P_IP) && + ip_hdrlen(skb) > sizeof(struct iphdr)) + features &= ~NETIF_F_ALL_TSO; + + /* IPv4 TCP header has options field */ + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 && + tcp_hdrlen(skb) > sizeof(struct tcphdr)) + features &= ~NETIF_F_ALL_TSO; + + else if (rtl_last_frag_len(skb) <= 6) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static netdev_features_t rtl8169_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (skb_is_gso(skb)) { + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + features = rtl8168evl_fix_tso(skb, features); + + if (skb_transport_offset(skb) > GTTCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_ALL_TSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* work around hw bug on some chip versions */ + if (skb->len < ETH_ZLEN) + features &= ~NETIF_F_CSUM_MASK; + + if (rtl_quirk_packet_padto(tp, skb)) + features &= ~NETIF_F_CSUM_MASK; + + if (skb_transport_offset(skb) > TCPHO_MAX && + rtl_chip_supports_csum_v2(tp)) + features &= ~NETIF_F_CSUM_MASK; + } + + return vlan_features_check(skb, features); +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int pci_status_errs; + u16 pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + + pci_status_errs = pci_status_get_and_clear_errors(pdev); + + if (net_ratelimit()) + netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n", + pci_cmd, pci_status_errs); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, + int budget) +{ + unsigned int dirty_tx, bytes_compl = 0, pkts_compl = 0; + struct sk_buff *skb; + + dirty_tx = tp->dirty_tx; + + while (READ_ONCE(tp->cur_tx) != dirty_tx) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + u32 status; + + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + skb = tp->tx_skb[entry].skb; + rtl8169_unmap_tx_skb(tp, entry); + + if (skb) { + pkts_compl++; + bytes_compl += skb->len; + napi_consume_skb(skb, budget); + } + dirty_tx++; + } + + if (tp->dirty_tx != dirty_tx) { + netdev_completed_queue(dev, pkts_compl, bytes_compl); + dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl); + + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_store_mb(tp->dirty_tx, dirty_tx); + if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) + netif_wake_queue(dev); + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + * If skb is NULL then we come here again once a tx irq is + * triggered after the last fragment is marked transmitted. + */ + if (tp->cur_tx != dirty_tx && skb) + rtl8169_doorbell(tp); + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & (RxProtoMask | RxCSFailMask); + + if (status == RxProtoTCP || status == RxProtoUDP) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget) +{ + struct device *d = tp_to_dev(tp); + int count; + + for (count = 0; count < budget; count++, tp->cur_rx++) { + unsigned int pkt_size, entry = tp->cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + struct sk_buff *skb; + const void *rx_buf; + dma_addr_t addr; + u32 status; + + status = le32_to_cpu(desc->opts1); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Rx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + if (unlikely(status & RxRES)) { + if (net_ratelimit()) + netdev_warn(dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + + if (!(dev->features & NETIF_F_RXALL)) + goto release_descriptor; + else if (status & RxRWT || !(status & (RxRUNT | RxCRC))) + goto release_descriptor; + } + + pkt_size = status & GENMASK(13, 0); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size -= ETH_FCS_LEN; + + /* The driver does not support incoming fragmented frames. + * They are seen as a symptom of over-mtu sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + + addr = le64_to_cpu(desc->addr); + rx_buf = page_address(tp->Rx_databuff[entry]); + + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(rx_buf); + skb_copy_to_linear_data(skb, rx_buf, pkt_size); + skb->tail += pkt_size; + skb->len = pkt_size; + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + rtl8169_rx_csum(skb, status); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + napi_gro_receive(&tp->napi, skb); + + dev_sw_netstats_rx_add(dev, pkt_size); +release_descriptor: + rtl8169_mark_to_asic(desc); + } + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct rtl8169_private *tp = dev_instance; + u32 status = rtl_get_events(tp); + + if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask)) + return IRQ_NONE; + + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(tp->dev); + goto out; + } + + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); + + if (unlikely(status & RxFIFOOver && + tp->mac_version == RTL_GIGA_MAC_VER_11)) { + netif_stop_queue(tp->dev); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + } + + if (napi_schedule_prep(&tp->napi)) { + rtl_irq_disable(tp); + __napi_schedule(&tp->napi); + } +out: + rtl_ack_events(tp, status); + + return IRQ_HANDLED; +} + +static void rtl_task(struct work_struct *work) +{ + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + + rtnl_lock(); + + if (!netif_running(tp->dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) { + rtl_reset_work(tp); + netif_wake_queue(tp->dev); + } +out_unlock: + rtnl_unlock(); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + int work_done; + + rtl_tx(dev, tp, budget); + + work_done = rtl_rx(dev, tp, budget); + + if (work_done < budget && napi_complete_done(napi, work_done)) + rtl_irq_enable(tp); + + return work_done; +} + +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + phy_print_status(tp->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + phy_attached_info(phydev); + + return 0; +} + +static void rtl8169_down(struct rtl8169_private *tp) +{ + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + phy_stop(tp->phydev); + + rtl8169_update_counters(tp); + + pci_clear_master(tp->pci_dev); + rtl_pci_commit(tp); + + rtl8169_cleanup(tp); + rtl_disable_exit_l1(tp); + rtl_prepare_power_down(tp); +} + +static void rtl8169_up(struct rtl8169_private *tp) +{ + pci_set_master(tp->pci_dev); + phy_init_hw(tp->phydev); + phy_resume(tp->phydev); + rtl8169_init_phy(tp); + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_reset_work(tp); + + phy_start(tp->phydev); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + netif_stop_queue(dev); + rtl8169_down(tp); + rtl8169_rx_clear(tp); + + cancel_work_sync(&tp->wk.work); + + free_irq(tp->irq, tp); + + phy_disconnect(tp->phydev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(tp->irq, tp); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned long irqflags; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto out; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(tp); + if (retval < 0) + goto err_free_rx_1; + + rtl_request_firmware(tp); + + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED; + retval = request_irq(tp->irq, rtl8169_interrupt, irqflags, dev->name, tp); + if (retval < 0) + goto err_release_fw_2; + + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + + rtl8169_up(tp); + rtl8169_init_counter_offsets(tp); + netif_start_queue(dev); +out: + pm_runtime_put_sync(&pdev->dev); + + return retval; + +err_free_irq: + free_irq(tp->irq, tp); +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + goto out; +} + +static void +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + struct rtl8169_counters *counters = tp->counters; + + pm_runtime_get_noresume(&pdev->dev); + + netdev_stats_to_stats64(stats, &dev->stats); + dev_fetch_sw_netstats(stats, dev->tstats); + + /* + * Fetch additional counter values missing in stats collected by driver + * from tally counters. + */ + if (pm_runtime_active(&pdev->dev)) + rtl8169_update_counters(tp); + + /* + * Subtract values fetched during initalization. + * See rtl8169_init_counter_offsets for a description why we do that. + */ + stats->tx_errors = le64_to_cpu(counters->tx_errors) - + le64_to_cpu(tp->tc_offset.tx_errors); + stats->collisions = le32_to_cpu(counters->tx_multi_collision) - + le32_to_cpu(tp->tc_offset.tx_multi_collision); + stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - + le16_to_cpu(tp->tc_offset.tx_aborted); + stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) - + le16_to_cpu(tp->tc_offset.rx_missed); + + pm_runtime_put_noidle(&pdev->dev); +} + +static void rtl8169_net_suspend(struct rtl8169_private *tp) +{ + netif_device_detach(tp->dev); + + if (netif_running(tp->dev)) + rtl8169_down(tp); +} + +static int rtl8169_runtime_resume(struct device *dev) +{ + struct rtl8169_private *tp = dev_get_drvdata(dev); + + rtl_rar_set(tp, tp->dev->dev_addr); + __rtl8169_set_wol(tp, tp->saved_wolopts); + + if (tp->TxDescArray) + rtl8169_up(tp); + + netif_device_attach(tp->dev); + + return 0; +} + +static int rtl8169_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + rtnl_lock(); + rtl8169_net_suspend(tp); + if (!device_may_wakeup(tp_to_dev(tp))) + clk_disable_unprepare(tp->clk); + rtnl_unlock(); + + return 0; +} + +static int rtl8169_resume(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!device_may_wakeup(tp_to_dev(tp))) + clk_prepare_enable(tp->clk); + + /* Reportedly at least Asus X453MA truncates packets otherwise */ + if (tp->mac_version == RTL_GIGA_MAC_VER_37) + rtl_init_rxcfg(tp); + + return rtl8169_runtime_resume(device); +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (!tp->TxDescArray) { + netif_device_detach(tp->dev); + return 0; + } + + rtnl_lock(); + __rtl8169_set_wol(tp, WAKE_PHY); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct rtl8169_private *tp = dev_get_drvdata(device); + + if (tp->dash_type != RTL_DASH_NONE) + return -EBUSY; + + if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev)) + pm_schedule_suspend(device, 10000); + + return -EBUSY; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume) + RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume, + rtl8169_runtime_idle) +}; + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + rtnl_lock(); + rtl8169_net_suspend(tp); + rtnl_unlock(); + + /* Restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); + + if (system_state == SYSTEM_POWER_OFF && + tp->dash_type == RTL_DASH_NONE) { + pci_wake_from_d3(pdev, tp->saved_wolopts); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct rtl8169_private *tp = pci_get_drvdata(pdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + unregister_netdev(tp->dev); + + if (tp->dash_type != RTL_DASH_NONE) + rtl8168_driver_stop(tp); + + rtl_release_firmware(tp); + + /* restore original MAC address */ + rtl_rar_set(tp, tp->dev->perm_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_features_check = rtl8169_features_check, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static void rtl_set_irq_mask(struct rtl8169_private *tp) +{ + tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg; + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver; + else if (tp->mac_version == RTL_GIGA_MAC_VER_11) + /* special workaround needed */ + tp->irq_mask |= RxFIFOOver; + else + tp->irq_mask |= RxOverflow; +} + +static int rtl_alloc_irq(struct rtl8169_private *tp) +{ + unsigned int flags; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + rtl_unlock_config_regs(tp); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); + rtl_lock_config_regs(tp); + fallthrough; + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: + flags = PCI_IRQ_LEGACY; + break; + default: + flags = PCI_IRQ_ALL_TYPES; + break; + } + + return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); +} + +static void rtl_read_mac_address(struct rtl8169_private *tp, + u8 mac_addr[ETH_ALEN]) +{ + /* Get MAC address */ + if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) { + u32 value; + + value = rtl_eri_read(tp, 0xe0); + put_unaligned_le32(value, mac_addr); + value = rtl_eri_read(tp, 0xe4); + put_unaligned_le16(value, mac_addr + 4); + } else if (rtl_is_8125(tp)) { + rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP); + } +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + return RTL_R8(tp, MCU) & LINK_LIST_RDY; +} + +static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp) +{ + rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42); +} + +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_MAC_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = devm_mdiobus_register(&pdev->dev, new_bus); + if (ret) + return ret; + + tp->phydev = mdiobus_get_phy(new_bus, 0); + if (!tp->phydev) { + return -ENODEV; + } else if (!tp->phydev->drv) { + /* Most chip versions fail with the genphy driver. + * Therefore ensure that the dedicated PHY driver is loaded. + */ + dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n", + tp->phydev->phy_id); + return -EUNATCH; + } + + tp->phydev->mac_managed_pm = 1; + + phy_support_asym_pause(tp->phydev); + + /* PHY will be woken up in rtl_open() */ + phy_suspend(tp->phydev); + + return 0; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15)); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_init_8125(struct rtl8169_private *tp) +{ + rtl_enable_rxdvgate(tp); + + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); + + r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0); + r8168g_wait_ll_share_fifo_ready(tp); + + r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0); + r8168_mac_ocp_write(tp, 0xc0a6, 0x0150); + r8168_mac_ocp_write(tp, 0xc01e, 0x5555); + r8168g_wait_ll_share_fifo_ready(tp); +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53: + rtl8168ep_stop_cmac(tp); + fallthrough; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + rtl_hw_init_8125(tp); + break; + default: + break; + } +} + +static int rtl_jumbo_max(struct rtl8169_private *tp) +{ + /* Non-GBit versions don't support jumbo frames */ + if (!tp->supports_gmii) + return 0; + + switch (tp->mac_version) { + /* RTL8169 */ + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: + return JUMBO_7K; + /* RTL8168b */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_17: + return JUMBO_4K; + /* RTL8168c */ + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: + return JUMBO_6K; + default: + return JUMBO_9K; + } +} + +static void rtl_init_mac_address(struct rtl8169_private *tp) +{ + u8 mac_addr[ETH_ALEN] __aligned(2) = {}; + struct net_device *dev = tp->dev; + int rc; + + rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); + if (!rc) + goto done; + + rtl_read_mac_address(tp, mac_addr); + if (is_valid_ether_addr(mac_addr)) + goto done; + + rtl_read_mac_from_reg(tp, mac_addr, MAC0); + if (is_valid_ether_addr(mac_addr)) + goto done; + + eth_random_addr(mac_addr); + dev->addr_assign_type = NET_ADDR_RANDOM; + dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); +done: + eth_hw_addr_set(dev, mac_addr); + rtl_rar_set(tp, mac_addr); +} + +/* register is set if system vendor successfully tested ASPM 1.2 */ +static bool rtl_aspm_is_safe(struct rtl8169_private *tp) +{ + if (tp->mac_version >= RTL_GIGA_MAC_VER_61 && + r8168_mac_ocp_read(tp, 0xc0b2) & 0xf) + return true; + + return false; +} + +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct rtl8169_private *tp; + int jumbo_max, region, rc; + enum mac_version chipset; + struct net_device *dev; + u16 xid; + + dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; + tp->eee_adv = -1; + tp->ocp_base = OCP_STD_PHY_BASE; + + dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, + struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + /* Get the *optional* external "ether_clk" used on some boards */ + tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk"); + if (IS_ERR(tp->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(tp->clk), "failed to get ether_clk\n"); + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pcim_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "enable failure\n"); + return rc; + } + + if (pcim_set_mwi(pdev) < 0) + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); + + /* use first MMIO region */ + region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; + if (region < 0) { + dev_err(&pdev->dev, "no MMIO resource found\n"); + return -ENODEV; + } + + rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME); + if (rc < 0) { + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); + return rc; + } + + tp->mmio_addr = pcim_iomap_table(pdev)[region]; + + xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf; + + /* Identify chip attached to board */ + chipset = rtl8169_get_mac_version(xid, tp->supports_gmii); + if (chipset == RTL_GIGA_MAC_NONE) { + dev_err(&pdev->dev, "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n", xid); + return -ENODEV; + } + + tp->mac_version = chipset; + + /* Disable ASPM L1 as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + * Chips from RTL8168h partially have issues with L1.2, but seem + * to work fine with L1 and L1.1. + */ + if (rtl_aspm_is_safe(tp)) + rc = 0; + else if (tp->mac_version >= RTL_GIGA_MAC_VER_46) + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); + else + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1); + tp->aspm_manageable = !rc; + + tp->dash_type = rtl_check_dash(tp); + + tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK; + + if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev->features |= NETIF_F_HIGHDMA; + + rtl_init_rxcfg(tp); + + rtl8169_irq_mask_and_ack(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rc = rtl_alloc_irq(tp); + if (rc < 0) { + dev_err(&pdev->dev, "Can't allocate interrupt\n"); + return rc; + } + tp->irq = pci_irq_vector(pdev, 0); + + INIT_WORK(&tp->wk.work, rtl_task); + + rtl_init_mac_address(tp); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + + netif_napi_add(dev, &tp->napi, rtl8169_poll); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* Disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (rtl_chip_supports_csum_v2(tp)) + dev->hw_features |= NETIF_F_IPV6_CSUM; + + dev->features |= dev->hw_features; + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ + if (rtl_chip_supports_csum_v2(tp)) { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V2); + netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V2); + } else { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; + netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V1); + netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V1); + } + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* configure chip for default features */ + rtl8169_set_features(dev, dev->features); + + if (tp->dash_type == RTL_DASH_NONE) { + rtl_set_d3_pll_down(tp, true); + } else { + rtl_set_d3_pll_down(tp, false); + dev->wol_enabled = 1; + } + + jumbo_max = rtl_jumbo_max(tp); + if (jumbo_max) + dev->max_mtu = jumbo_max; + + rtl_set_irq_mask(tp); + + tp->fw_name = rtl_chip_infos[chipset].fw_name; + + tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, + GFP_KERNEL); + if (!tp->counters) + return -ENOMEM; + + pci_set_drvdata(pdev, tp); + + rc = r8169_mdio_register(tp); + if (rc) + return rc; + + rc = register_netdev(dev); + if (rc) + return rc; + + netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n", + rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq); + + if (jumbo_max) + netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n", + jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? + "ok" : "ko"); + + if (tp->dash_type != RTL_DASH_NONE) { + netdev_info(dev, "DASH enabled\n"); + rtl8168_driver_start(tp); + } + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, + .driver.pm = pm_ptr(&rtl8169_pm_ops), +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/devices/r8169/r8169_phy_config-5.10-ethercat.c b/devices/r8169/r8169_phy_config-5.10-ethercat.c new file mode 100644 index 00000000..f6831ffd --- /dev/null +++ b/devices/r8169/r8169_phy_config-5.10-ethercat.c @@ -0,0 +1,1368 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169_phy_config.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169-5.10-ethercat.h" + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *tp, + struct phy_device *phydev); + +static void r8168d_modify_extpage(struct phy_device *phydev, int extpage, + int reg, u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0007); + + __phy_write(phydev, 0x1e, extpage); + __phy_modify(phydev, reg, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168d_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0005); + + __phy_write(phydev, 0x05, parm); + __phy_modify(phydev, 0x06, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168g_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0a43); + + __phy_write(phydev, 0x13, parm); + __phy_modify(phydev, 0x14, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void __rtl_writephy_batch(struct phy_device *phydev, + const struct phy_reg *regs, int len) +{ + phy_lock_mdio_bus(phydev); + + while (len-- > 0) { + __phy_write(phydev, regs->reg, regs->val); + regs++; + } + + phy_unlock_mdio_bus(phydev); +} + +#define rtl_writephy_batch(p, a) __rtl_writephy_batch(p, a, ARRAY_SIZE(a)) + +static void rtl8168f_config_eee_phy(struct phy_device *phydev) +{ + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8)); + r8168d_phy_param(phydev, 0x8b85, 0, BIT(13)); +} + +static void rtl8168g_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x11, 0, BIT(4)); +} + +static void rtl8168h_config_eee_phy(struct phy_device *phydev) +{ + rtl8168g_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080); +} + +static void rtl8125a_config_eee_phy(struct phy_device *phydev) +{ + rtl8168h_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); +} + +static void rtl8125b_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0002, 0x01, 0x90d0); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1f, 0x0001); + phy_set_bits(phydev, 0x16, BIT(0)); + phy_write(phydev, 0x10, 0xf41b); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x10, 0xf41b); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1d, 0x0f00); + phy_write_paged(phydev, 0x0002, 0x0c, 0x1ec8); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); + phy_write_paged(phydev, 0x0001, 0x1d, 0x3d98); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } +}; + +static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } +}; + +static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, + struct phy_device *phydev, + u16 val) +{ + u16 reg_val; + + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x001b); + reg_val = phy_read(phydev, 0x06); + phy_write(phydev, 0x1f, 0x0000); + + if (reg_val != val) + phydev_warn(phydev, "chipset not ready for firmware\n"); + else + r8169_apply_firmware(tp); +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x0b, 0x00ef, 0x0010); + phy_modify(phydev, 0x0c, 0x5d00, 0xa200); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x6662); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662); + } + + /* RSET couple improve */ + phy_write(phydev, 0x1f, 0x0002); + phy_set_bits(phydev, 0x0d, 0x0300); + phy_set_bits(phydev, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xbf00); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x2642); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642); + } + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + /* Switching regulator Slew rate */ + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0017); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xb300); +} + +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + }; + + rtl_writephy_batch(phydev, phy_reg_init); + r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x17, 0x0cc0); + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0xffff, 0x0040); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + }; + + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896); + + rtl_writephy_batch(phydev, phy_reg_init); + + /* Update PFM & 10M TX idle timer */ + r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919); + + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* DCO enable for 10M IDLE Power */ + r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006); + + /* For impedance matching */ + phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050); + phy_set_bits(phydev, 0x14, BIT(15)); + + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000); + + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000); + phy_write_paged(phydev, 0x0006, 0x00, 0x5a00); + + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Green Setting */ + r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222); + r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000); + r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000); + + /* For 4-corner performance improve */ + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x8b80); + phy_set_bits(phydev, 0x17, 0x0006); + phy_write(phydev, 0x1f, 0x0000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + rtl8168f_config_eee_phy(phydev); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_set_bits(phydev, 0x19, BIT(0)); + phy_set_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); + phy_modify_paged(phydev, 0x0005, 0x01, 0, BIT(8)); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* For 4-corner performance improve */ + r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* Improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + rtl8168f_config_eee_phy(phydev); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + /* Modify green table for giga */ + r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100); + r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000); + + /* uc same-seed solution */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_clear_bits(phydev, 0x19, BIT(0)); + phy_clear_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168g_disable_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0); +} + +static void rtl8168g_enable_gphy_10m(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11)); +} + +static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0); + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6)); + r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000); + phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int ret; + + r8169_apply_firmware(tp); + + ret = phy_read_paged(phydev, 0x0a46, 0x10); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0bcc, 0x12, BIT(15), 0); + else + phy_modify_paged(phydev, 0x0bcc, 0x12, 0, BIT(15)); + + ret = phy_read_paged(phydev, 0x0a46, 0x13); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0c41, 0x15, 0, BIT(1)); + else + phy_modify_paged(phydev, 0x0c41, 0x15, BIT(1), 0); + + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Improve SWR Efficiency */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x11, 0x5655); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 dout_tapbin; + u32 data; + + r8169_apply_firmware(tp); + + /* CHN EST parameters adjust - giga master */ + r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000); + r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00); + + /* CHN EST parameters adjust - giga slave */ + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000); + r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000); + + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00); + + /* enable R-tune & PGA-retune function */ + dout_tapbin = 0; + data = phy_read_paged(phydev, 0x0a46, 0x13); + data &= 3; + data <<= 2; + dout_tapbin |= data; + data = phy_read_paged(phydev, 0x0a46, 0x12); + data &= 0xc000; + data >>= 14; + dout_tapbin |= data; + dout_tapbin = ~(dout_tapbin ^ 0x08); + dout_tapbin <<= 12; + dout_tapbin &= 0xf000; + + r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + /* SAR ADC performance */ + phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14)); + + r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 ioffset, rlen; + u32 data; + + r8169_apply_firmware(tp); + + /* CHIN EST parameter update */ + r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a); + + /* enable R-tune & PGA-retune function */ + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + ioffset = rtl8168h_2_get_adc_bias_ioffset(tp); + if (ioffset != 0xffff) + phy_write_paged(phydev, 0x0bcf, 0x16, ioffset); + + /* Modify rlen (TX LPF corner frequency) level */ + data = phy_read_paged(phydev, 0x0bcd, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen << 4) | (rlen << 8) | (rlen << 12); + phy_write_paged(phydev, 0x0bcd, 0x17, data); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* Set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Channel estimation parameters */ + r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00); + r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00); + r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500); + r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00); + r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800); + r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400); + r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800); + r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00); + r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500); + r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100); + r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200); + r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400); + r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00); + r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00); + r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00); + r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00); + r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00); + r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400); + + /* Force PWM-mode */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x12, 0x00ed); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8117_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000); + + r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000); + r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00); + r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600); + r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000); + r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000); + r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00); + r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800); + r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200); + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800); + r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00); + r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300); + + r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800); + + rtl8168g_enable_gphy_10m(phydev); + + r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + phy_set_bits(phydev, 0x11, BIT(12)); + phy_set_bits(phydev, 0x19, BIT(13)); + phy_set_bits(phydev, 0x10, BIT(15)); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8401_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x11, BIT(12)); + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0003); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + phy_write_paged(phydev, 0x0005, 0x1a, 0x0000); + phy_write_paged(phydev, 0x0004, 0x1c, 0x0000); + phy_write_paged(phydev, 0x0001, 0x15, 0x7701); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before setting firmware */ + phy_write(phydev, 0x18, 0x0310); + msleep(20); + + r8169_apply_firmware(tp); + + /* EEE setting */ + phy_write(phydev, 0x1f, 0x0004); + phy_write(phydev, 0x10, 0x401f); + phy_write(phydev, 0x19, 0x7030); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8125_legacy_force_mode(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0); +} + +static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100); + phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000); + phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400); + phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff); + phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff); + + r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400); + r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300); + r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00); + r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000); + r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500); + r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300); + r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000); + r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500); + r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00); + r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100); + r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038); + r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6); + + phy_write_paged(phydev, 0xbc3, 0x12, 0x5555); + phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00); + phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int i; + + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000); + phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002); + phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044); + phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000); + phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002); + phy_write_paged(phydev, 0xad4, 0x16, 0x00a8); + phy_write_paged(phydev, 0xac5, 0x16, 0x01ff); + phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80a2); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x16, 0x809c); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x81B3); + phy_write(phydev, 0x14, 0x0043); + phy_write(phydev, 0x14, 0x00A7); + phy_write(phydev, 0x14, 0x00D6); + phy_write(phydev, 0x14, 0x00EC); + phy_write(phydev, 0x14, 0x00F6); + phy_write(phydev, 0x14, 0x00FB); + phy_write(phydev, 0x14, 0x00FD); + phy_write(phydev, 0x14, 0x00FF); + phy_write(phydev, 0x14, 0x00BB); + phy_write(phydev, 0x14, 0x0058); + phy_write(phydev, 0x14, 0x0029); + phy_write(phydev, 0x14, 0x0013); + phy_write(phydev, 0x14, 0x0009); + phy_write(phydev, 0x14, 0x0004); + phy_write(phydev, 0x14, 0x0002); + for (i = 0; i < 25; i++) + phy_write(phydev, 0x14, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F); + r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843); + + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000); + + r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100); + + phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00); + phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); + phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80f5); + phy_write(phydev, 0x17, 0x760e); + phy_write(phydev, 0x16, 0x8107); + phy_write(phydev, 0x17, 0x360e); + phy_write(phydev, 0x16, 0x8551); + phy_modify(phydev, 0x17, 0xff00, 0x0800); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); + phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); + + r8168g_phy_param(phydev, 0x8044, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x804a, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8050, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8056, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x805c, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8062, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8068, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x806e, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8074, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x807a, 0xffff, 0x2417); + + phy_modify_paged(phydev, 0xa4c, 0x15, 0x0000, 0x0040); + phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); + + rtl8125_legacy_force_mode(phydev); + rtl8125b_config_eee_phy(phydev); +} + +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver) +{ + static const rtl_phy_cfg_fct phy_configs[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config, + [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config, + [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, + [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, + [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config, + [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config, + [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, + [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, + [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config, + [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, + [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_31] = NULL, + [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config, + [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config, + [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config, + [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config, + [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config, + [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config, + [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config, + [RTL_GIGA_MAC_VER_41] = NULL, + [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config, + [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config, + [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, + [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, + }; + + if (phy_configs[ver]) + phy_configs[ver](tp, phydev); +} diff --git a/devices/r8169/r8169_phy_config-5.10-orig.c b/devices/r8169/r8169_phy_config-5.10-orig.c new file mode 100644 index 00000000..913d030d --- /dev/null +++ b/devices/r8169/r8169_phy_config-5.10-orig.c @@ -0,0 +1,1368 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169_phy_config.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169.h" + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *tp, + struct phy_device *phydev); + +static void r8168d_modify_extpage(struct phy_device *phydev, int extpage, + int reg, u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0007); + + __phy_write(phydev, 0x1e, extpage); + __phy_modify(phydev, reg, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168d_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0005); + + __phy_write(phydev, 0x05, parm); + __phy_modify(phydev, 0x06, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168g_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0a43); + + __phy_write(phydev, 0x13, parm); + __phy_modify(phydev, 0x14, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void __rtl_writephy_batch(struct phy_device *phydev, + const struct phy_reg *regs, int len) +{ + phy_lock_mdio_bus(phydev); + + while (len-- > 0) { + __phy_write(phydev, regs->reg, regs->val); + regs++; + } + + phy_unlock_mdio_bus(phydev); +} + +#define rtl_writephy_batch(p, a) __rtl_writephy_batch(p, a, ARRAY_SIZE(a)) + +static void rtl8168f_config_eee_phy(struct phy_device *phydev) +{ + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8)); + r8168d_phy_param(phydev, 0x8b85, 0, BIT(13)); +} + +static void rtl8168g_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x11, 0, BIT(4)); +} + +static void rtl8168h_config_eee_phy(struct phy_device *phydev) +{ + rtl8168g_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080); +} + +static void rtl8125a_config_eee_phy(struct phy_device *phydev) +{ + rtl8168h_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); +} + +static void rtl8125b_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0002, 0x01, 0x90d0); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1f, 0x0001); + phy_set_bits(phydev, 0x16, BIT(0)); + phy_write(phydev, 0x10, 0xf41b); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x10, 0xf41b); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1d, 0x0f00); + phy_write_paged(phydev, 0x0002, 0x0c, 0x1ec8); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); + phy_write_paged(phydev, 0x0001, 0x1d, 0x3d98); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } +}; + +static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } +}; + +static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, + struct phy_device *phydev, + u16 val) +{ + u16 reg_val; + + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x001b); + reg_val = phy_read(phydev, 0x06); + phy_write(phydev, 0x1f, 0x0000); + + if (reg_val != val) + phydev_warn(phydev, "chipset not ready for firmware\n"); + else + r8169_apply_firmware(tp); +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x0b, 0x00ef, 0x0010); + phy_modify(phydev, 0x0c, 0x5d00, 0xa200); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x6662); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662); + } + + /* RSET couple improve */ + phy_write(phydev, 0x1f, 0x0002); + phy_set_bits(phydev, 0x0d, 0x0300); + phy_set_bits(phydev, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xbf00); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x2642); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642); + } + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + /* Switching regulator Slew rate */ + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0017); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xb300); +} + +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + }; + + rtl_writephy_batch(phydev, phy_reg_init); + r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x17, 0x0cc0); + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0xffff, 0x0040); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + }; + + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896); + + rtl_writephy_batch(phydev, phy_reg_init); + + /* Update PFM & 10M TX idle timer */ + r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919); + + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* DCO enable for 10M IDLE Power */ + r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006); + + /* For impedance matching */ + phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050); + phy_set_bits(phydev, 0x14, BIT(15)); + + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000); + + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000); + phy_write_paged(phydev, 0x0006, 0x00, 0x5a00); + + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Green Setting */ + r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222); + r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000); + r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000); + + /* For 4-corner performance improve */ + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x8b80); + phy_set_bits(phydev, 0x17, 0x0006); + phy_write(phydev, 0x1f, 0x0000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + rtl8168f_config_eee_phy(phydev); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_set_bits(phydev, 0x19, BIT(0)); + phy_set_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); + phy_modify_paged(phydev, 0x0005, 0x01, 0, BIT(8)); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* For 4-corner performance improve */ + r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* Improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + rtl8168f_config_eee_phy(phydev); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + /* Modify green table for giga */ + r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100); + r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000); + + /* uc same-seed solution */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_clear_bits(phydev, 0x19, BIT(0)); + phy_clear_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168g_disable_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0); +} + +static void rtl8168g_enable_gphy_10m(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11)); +} + +static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0); + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6)); + r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000); + phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int ret; + + r8169_apply_firmware(tp); + + ret = phy_read_paged(phydev, 0x0a46, 0x10); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0bcc, 0x12, BIT(15), 0); + else + phy_modify_paged(phydev, 0x0bcc, 0x12, 0, BIT(15)); + + ret = phy_read_paged(phydev, 0x0a46, 0x13); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0c41, 0x15, 0, BIT(1)); + else + phy_modify_paged(phydev, 0x0c41, 0x15, BIT(1), 0); + + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Improve SWR Efficiency */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x11, 0x5655); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 dout_tapbin; + u32 data; + + r8169_apply_firmware(tp); + + /* CHN EST parameters adjust - giga master */ + r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000); + r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00); + + /* CHN EST parameters adjust - giga slave */ + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000); + r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000); + + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00); + + /* enable R-tune & PGA-retune function */ + dout_tapbin = 0; + data = phy_read_paged(phydev, 0x0a46, 0x13); + data &= 3; + data <<= 2; + dout_tapbin |= data; + data = phy_read_paged(phydev, 0x0a46, 0x12); + data &= 0xc000; + data >>= 14; + dout_tapbin |= data; + dout_tapbin = ~(dout_tapbin ^ 0x08); + dout_tapbin <<= 12; + dout_tapbin &= 0xf000; + + r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + /* SAR ADC performance */ + phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14)); + + r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 ioffset, rlen; + u32 data; + + r8169_apply_firmware(tp); + + /* CHIN EST parameter update */ + r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a); + + /* enable R-tune & PGA-retune function */ + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + ioffset = rtl8168h_2_get_adc_bias_ioffset(tp); + if (ioffset != 0xffff) + phy_write_paged(phydev, 0x0bcf, 0x16, ioffset); + + /* Modify rlen (TX LPF corner frequency) level */ + data = phy_read_paged(phydev, 0x0bcd, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen << 4) | (rlen << 8) | (rlen << 12); + phy_write_paged(phydev, 0x0bcd, 0x17, data); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* Set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Channel estimation parameters */ + r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00); + r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00); + r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500); + r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00); + r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800); + r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400); + r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800); + r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00); + r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500); + r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100); + r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200); + r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400); + r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00); + r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00); + r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00); + r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00); + r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00); + r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400); + + /* Force PWM-mode */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x12, 0x00ed); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8117_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000); + + r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000); + r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00); + r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600); + r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000); + r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000); + r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00); + r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800); + r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200); + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800); + r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00); + r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300); + + r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800); + + rtl8168g_enable_gphy_10m(phydev); + + r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + phy_set_bits(phydev, 0x11, BIT(12)); + phy_set_bits(phydev, 0x19, BIT(13)); + phy_set_bits(phydev, 0x10, BIT(15)); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8401_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x11, BIT(12)); + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0003); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + phy_write_paged(phydev, 0x0005, 0x1a, 0x0000); + phy_write_paged(phydev, 0x0004, 0x1c, 0x0000); + phy_write_paged(phydev, 0x0001, 0x15, 0x7701); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before setting firmware */ + phy_write(phydev, 0x18, 0x0310); + msleep(20); + + r8169_apply_firmware(tp); + + /* EEE setting */ + phy_write(phydev, 0x1f, 0x0004); + phy_write(phydev, 0x10, 0x401f); + phy_write(phydev, 0x19, 0x7030); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8125_legacy_force_mode(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0); +} + +static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100); + phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000); + phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400); + phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff); + phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff); + + r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400); + r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300); + r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00); + r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000); + r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500); + r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300); + r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000); + r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500); + r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00); + r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100); + r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038); + r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6); + + phy_write_paged(phydev, 0xbc3, 0x12, 0x5555); + phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00); + phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int i; + + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000); + phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002); + phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044); + phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000); + phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002); + phy_write_paged(phydev, 0xad4, 0x16, 0x00a8); + phy_write_paged(phydev, 0xac5, 0x16, 0x01ff); + phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80a2); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x16, 0x809c); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x81B3); + phy_write(phydev, 0x14, 0x0043); + phy_write(phydev, 0x14, 0x00A7); + phy_write(phydev, 0x14, 0x00D6); + phy_write(phydev, 0x14, 0x00EC); + phy_write(phydev, 0x14, 0x00F6); + phy_write(phydev, 0x14, 0x00FB); + phy_write(phydev, 0x14, 0x00FD); + phy_write(phydev, 0x14, 0x00FF); + phy_write(phydev, 0x14, 0x00BB); + phy_write(phydev, 0x14, 0x0058); + phy_write(phydev, 0x14, 0x0029); + phy_write(phydev, 0x14, 0x0013); + phy_write(phydev, 0x14, 0x0009); + phy_write(phydev, 0x14, 0x0004); + phy_write(phydev, 0x14, 0x0002); + for (i = 0; i < 25; i++) + phy_write(phydev, 0x14, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F); + r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843); + + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000); + + r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100); + + phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00); + phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); + phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80f5); + phy_write(phydev, 0x17, 0x760e); + phy_write(phydev, 0x16, 0x8107); + phy_write(phydev, 0x17, 0x360e); + phy_write(phydev, 0x16, 0x8551); + phy_modify(phydev, 0x17, 0xff00, 0x0800); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); + phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); + + r8168g_phy_param(phydev, 0x8044, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x804a, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8050, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8056, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x805c, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8062, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8068, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x806e, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8074, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x807a, 0xffff, 0x2417); + + phy_modify_paged(phydev, 0xa4c, 0x15, 0x0000, 0x0040); + phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); + + rtl8125_legacy_force_mode(phydev); + rtl8125b_config_eee_phy(phydev); +} + +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver) +{ + static const rtl_phy_cfg_fct phy_configs[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config, + [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config, + [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, + [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, + [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config, + [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config, + [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, + [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, + [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config, + [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, + [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_31] = NULL, + [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config, + [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config, + [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config, + [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config, + [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config, + [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config, + [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config, + [RTL_GIGA_MAC_VER_41] = NULL, + [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config, + [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config, + [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, + [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, + }; + + if (phy_configs[ver]) + phy_configs[ver](tp, phydev); +} diff --git a/devices/r8169/r8169_phy_config-5.14-ethercat.c b/devices/r8169/r8169_phy_config-5.14-ethercat.c new file mode 100644 index 00000000..11caba68 --- /dev/null +++ b/devices/r8169/r8169_phy_config-5.14-ethercat.c @@ -0,0 +1,1369 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169_phy_config.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169-5.14-ethercat.h" + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *tp, + struct phy_device *phydev); + +static void r8168d_modify_extpage(struct phy_device *phydev, int extpage, + int reg, u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0007); + + __phy_write(phydev, 0x1e, extpage); + __phy_modify(phydev, reg, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168d_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0005); + + __phy_write(phydev, 0x05, parm); + __phy_modify(phydev, 0x06, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168g_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0a43); + + __phy_write(phydev, 0x13, parm); + __phy_modify(phydev, 0x14, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void __rtl_writephy_batch(struct phy_device *phydev, + const struct phy_reg *regs, int len) +{ + phy_lock_mdio_bus(phydev); + + while (len-- > 0) { + __phy_write(phydev, regs->reg, regs->val); + regs++; + } + + phy_unlock_mdio_bus(phydev); +} + +#define rtl_writephy_batch(p, a) __rtl_writephy_batch(p, a, ARRAY_SIZE(a)) + +static void rtl8168f_config_eee_phy(struct phy_device *phydev) +{ + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8)); + r8168d_phy_param(phydev, 0x8b85, 0, BIT(13)); +} + +static void rtl8168g_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x11, 0, BIT(4)); +} + +static void rtl8168h_config_eee_phy(struct phy_device *phydev) +{ + rtl8168g_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080); +} + +static void rtl8125a_config_eee_phy(struct phy_device *phydev) +{ + rtl8168h_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); +} + +static void rtl8125b_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0002, 0x01, 0x90d0); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1f, 0x0001); + phy_set_bits(phydev, 0x16, BIT(0)); + phy_write(phydev, 0x10, 0xf41b); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x10, 0xf41b); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1d, 0x0f00); + phy_write_paged(phydev, 0x0002, 0x0c, 0x1ec8); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); + phy_write_paged(phydev, 0x0001, 0x1d, 0x3d98); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } +}; + +static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } +}; + +static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, + struct phy_device *phydev, + u16 val) +{ + u16 reg_val; + + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x001b); + reg_val = phy_read(phydev, 0x06); + phy_write(phydev, 0x1f, 0x0000); + + if (reg_val != val) + phydev_warn(phydev, "chipset not ready for firmware\n"); + else + r8169_apply_firmware(tp); +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x0b, 0x00ef, 0x0010); + phy_modify(phydev, 0x0c, 0x5d00, 0xa200); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x6662); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662); + } + + /* RSET couple improve */ + phy_write(phydev, 0x1f, 0x0002); + phy_set_bits(phydev, 0x0d, 0x0300); + phy_set_bits(phydev, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xbf00); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x2642); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642); + } + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + /* Switching regulator Slew rate */ + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0017); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xb300); +} + +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + }; + + rtl_writephy_batch(phydev, phy_reg_init); + r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x17, 0x0cc0); + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0xffff, 0x0040); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + }; + + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896); + + rtl_writephy_batch(phydev, phy_reg_init); + + /* Update PFM & 10M TX idle timer */ + r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919); + + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* DCO enable for 10M IDLE Power */ + r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006); + + /* For impedance matching */ + phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050); + phy_set_bits(phydev, 0x14, BIT(15)); + + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000); + + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000); + phy_write_paged(phydev, 0x0006, 0x00, 0x5a00); + + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Green Setting */ + r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222); + r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000); + r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000); + + /* For 4-corner performance improve */ + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x8b80); + phy_set_bits(phydev, 0x17, 0x0006); + phy_write(phydev, 0x1f, 0x0000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + rtl8168f_config_eee_phy(phydev); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_set_bits(phydev, 0x19, BIT(0)); + phy_set_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); + phy_modify_paged(phydev, 0x0005, 0x01, 0, BIT(8)); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* For 4-corner performance improve */ + r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* Improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + rtl8168f_config_eee_phy(phydev); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + /* Modify green table for giga */ + r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100); + r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000); + + /* uc same-seed solution */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_clear_bits(phydev, 0x19, BIT(0)); + phy_clear_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168g_disable_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0); +} + +static void rtl8168g_enable_gphy_10m(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11)); +} + +static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0); + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6)); + r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000); + phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int ret; + + r8169_apply_firmware(tp); + + ret = phy_read_paged(phydev, 0x0a46, 0x10); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0bcc, 0x12, BIT(15), 0); + else + phy_modify_paged(phydev, 0x0bcc, 0x12, 0, BIT(15)); + + ret = phy_read_paged(phydev, 0x0a46, 0x13); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0c41, 0x15, 0, BIT(1)); + else + phy_modify_paged(phydev, 0x0c41, 0x15, BIT(1), 0); + + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Improve SWR Efficiency */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x11, 0x5655); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 dout_tapbin; + u32 data; + + r8169_apply_firmware(tp); + + /* CHN EST parameters adjust - giga master */ + r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000); + r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00); + + /* CHN EST parameters adjust - giga slave */ + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000); + r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000); + + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00); + + /* enable R-tune & PGA-retune function */ + dout_tapbin = 0; + data = phy_read_paged(phydev, 0x0a46, 0x13); + data &= 3; + data <<= 2; + dout_tapbin |= data; + data = phy_read_paged(phydev, 0x0a46, 0x12); + data &= 0xc000; + data >>= 14; + dout_tapbin |= data; + dout_tapbin = ~(dout_tapbin ^ 0x08); + dout_tapbin <<= 12; + dout_tapbin &= 0xf000; + + r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + /* SAR ADC performance */ + phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14)); + + r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 ioffset, rlen; + u32 data; + + r8169_apply_firmware(tp); + + /* CHIN EST parameter update */ + r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a); + + /* enable R-tune & PGA-retune function */ + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + ioffset = rtl8168h_2_get_adc_bias_ioffset(tp); + if (ioffset != 0xffff) + phy_write_paged(phydev, 0x0bcf, 0x16, ioffset); + + /* Modify rlen (TX LPF corner frequency) level */ + data = phy_read_paged(phydev, 0x0bcd, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen << 4) | (rlen << 8) | (rlen << 12); + phy_write_paged(phydev, 0x0bcd, 0x17, data); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* Set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Channel estimation parameters */ + r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00); + r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00); + r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500); + r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00); + r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800); + r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400); + r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800); + r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00); + r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500); + r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100); + r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200); + r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400); + r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00); + r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00); + r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00); + r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00); + r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00); + r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400); + + /* Force PWM-mode */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x12, 0x00ed); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8117_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000); + + r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000); + r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00); + r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600); + r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000); + r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000); + r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00); + r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800); + r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200); + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800); + r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00); + r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300); + + r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800); + + rtl8168g_enable_gphy_10m(phydev); + + r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + phy_set_bits(phydev, 0x11, BIT(12)); + phy_set_bits(phydev, 0x19, BIT(13)); + phy_set_bits(phydev, 0x10, BIT(15)); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8401_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x11, BIT(12)); + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0003); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + phy_write_paged(phydev, 0x0005, 0x1a, 0x0000); + phy_write_paged(phydev, 0x0004, 0x1c, 0x0000); + phy_write_paged(phydev, 0x0001, 0x15, 0x7701); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before setting firmware */ + phy_write(phydev, 0x18, 0x0310); + msleep(20); + + r8169_apply_firmware(tp); + + /* EEE setting */ + phy_write(phydev, 0x1f, 0x0004); + phy_write(phydev, 0x10, 0x401f); + phy_write(phydev, 0x19, 0x7030); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8125_legacy_force_mode(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0); +} + +static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100); + phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000); + phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400); + phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff); + phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff); + + r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400); + r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300); + r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00); + r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000); + r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500); + r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300); + r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000); + r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500); + r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00); + r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100); + r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038); + r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6); + + phy_write_paged(phydev, 0xbc3, 0x12, 0x5555); + phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00); + phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int i; + + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000); + phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002); + phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044); + phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000); + phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002); + phy_write_paged(phydev, 0xad4, 0x16, 0x00a8); + phy_write_paged(phydev, 0xac5, 0x16, 0x01ff); + phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80a2); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x16, 0x809c); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x81B3); + phy_write(phydev, 0x14, 0x0043); + phy_write(phydev, 0x14, 0x00A7); + phy_write(phydev, 0x14, 0x00D6); + phy_write(phydev, 0x14, 0x00EC); + phy_write(phydev, 0x14, 0x00F6); + phy_write(phydev, 0x14, 0x00FB); + phy_write(phydev, 0x14, 0x00FD); + phy_write(phydev, 0x14, 0x00FF); + phy_write(phydev, 0x14, 0x00BB); + phy_write(phydev, 0x14, 0x0058); + phy_write(phydev, 0x14, 0x0029); + phy_write(phydev, 0x14, 0x0013); + phy_write(phydev, 0x14, 0x0009); + phy_write(phydev, 0x14, 0x0004); + phy_write(phydev, 0x14, 0x0002); + for (i = 0; i < 25; i++) + phy_write(phydev, 0x14, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F); + r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843); + + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000); + + r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100); + + phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00); + phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); + phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80f5); + phy_write(phydev, 0x17, 0x760e); + phy_write(phydev, 0x16, 0x8107); + phy_write(phydev, 0x17, 0x360e); + phy_write(phydev, 0x16, 0x8551); + phy_modify(phydev, 0x17, 0xff00, 0x0800); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); + phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); + + r8168g_phy_param(phydev, 0x8044, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x804a, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8050, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8056, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x805c, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8062, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8068, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x806e, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8074, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x807a, 0xffff, 0x2417); + + phy_modify_paged(phydev, 0xa4c, 0x15, 0x0000, 0x0040); + phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); + + rtl8125_legacy_force_mode(phydev); + rtl8125b_config_eee_phy(phydev); +} + +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver) +{ + static const rtl_phy_cfg_fct phy_configs[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config, + [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config, + [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, + [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, + [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config, + [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config, + [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, + [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, + [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config, + [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, + [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_31] = NULL, + [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config, + [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config, + [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config, + [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config, + [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config, + [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config, + [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config, + [RTL_GIGA_MAC_VER_41] = NULL, + [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config, + [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config, + [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, + [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, + }; + + if (phy_configs[ver]) + phy_configs[ver](tp, phydev); +} diff --git a/devices/r8169/r8169_phy_config-5.14-orig.c b/devices/r8169/r8169_phy_config-5.14-orig.c new file mode 100644 index 00000000..50f0f621 --- /dev/null +++ b/devices/r8169/r8169_phy_config-5.14-orig.c @@ -0,0 +1,1369 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169_phy_config.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169.h" + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *tp, + struct phy_device *phydev); + +static void r8168d_modify_extpage(struct phy_device *phydev, int extpage, + int reg, u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0007); + + __phy_write(phydev, 0x1e, extpage); + __phy_modify(phydev, reg, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168d_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0005); + + __phy_write(phydev, 0x05, parm); + __phy_modify(phydev, 0x06, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168g_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0a43); + + __phy_write(phydev, 0x13, parm); + __phy_modify(phydev, 0x14, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void __rtl_writephy_batch(struct phy_device *phydev, + const struct phy_reg *regs, int len) +{ + phy_lock_mdio_bus(phydev); + + while (len-- > 0) { + __phy_write(phydev, regs->reg, regs->val); + regs++; + } + + phy_unlock_mdio_bus(phydev); +} + +#define rtl_writephy_batch(p, a) __rtl_writephy_batch(p, a, ARRAY_SIZE(a)) + +static void rtl8168f_config_eee_phy(struct phy_device *phydev) +{ + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8)); + r8168d_phy_param(phydev, 0x8b85, 0, BIT(13)); +} + +static void rtl8168g_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x11, 0, BIT(4)); +} + +static void rtl8168h_config_eee_phy(struct phy_device *phydev) +{ + rtl8168g_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080); +} + +static void rtl8125a_config_eee_phy(struct phy_device *phydev) +{ + rtl8168h_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); +} + +static void rtl8125b_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0002, 0x01, 0x90d0); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1f, 0x0001); + phy_set_bits(phydev, 0x16, BIT(0)); + phy_write(phydev, 0x10, 0xf41b); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x10, 0xf41b); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1d, 0x0f00); + phy_write_paged(phydev, 0x0002, 0x0c, 0x1ec8); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); + phy_write_paged(phydev, 0x0001, 0x1d, 0x3d98); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } +}; + +static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } +}; + +static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, + struct phy_device *phydev, + u16 val) +{ + u16 reg_val; + + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x001b); + reg_val = phy_read(phydev, 0x06); + phy_write(phydev, 0x1f, 0x0000); + + if (reg_val != val) + phydev_warn(phydev, "chipset not ready for firmware\n"); + else + r8169_apply_firmware(tp); +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x0b, 0x00ef, 0x0010); + phy_modify(phydev, 0x0c, 0x5d00, 0xa200); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x6662); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662); + } + + /* RSET couple improve */ + phy_write(phydev, 0x1f, 0x0002); + phy_set_bits(phydev, 0x0d, 0x0300); + phy_set_bits(phydev, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xbf00); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x2642); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642); + } + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + /* Switching regulator Slew rate */ + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0017); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xb300); +} + +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + }; + + rtl_writephy_batch(phydev, phy_reg_init); + r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x17, 0x0cc0); + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0xffff, 0x0040); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + }; + + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896); + + rtl_writephy_batch(phydev, phy_reg_init); + + /* Update PFM & 10M TX idle timer */ + r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919); + + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* DCO enable for 10M IDLE Power */ + r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006); + + /* For impedance matching */ + phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050); + phy_set_bits(phydev, 0x14, BIT(15)); + + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000); + + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000); + phy_write_paged(phydev, 0x0006, 0x00, 0x5a00); + + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Green Setting */ + r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222); + r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000); + r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000); + + /* For 4-corner performance improve */ + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x8b80); + phy_set_bits(phydev, 0x17, 0x0006); + phy_write(phydev, 0x1f, 0x0000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + rtl8168f_config_eee_phy(phydev); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_set_bits(phydev, 0x19, BIT(0)); + phy_set_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); + phy_modify_paged(phydev, 0x0005, 0x01, 0, BIT(8)); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* For 4-corner performance improve */ + r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* Improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + rtl8168f_config_eee_phy(phydev); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + /* Modify green table for giga */ + r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100); + r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000); + + /* uc same-seed solution */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_clear_bits(phydev, 0x19, BIT(0)); + phy_clear_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168g_disable_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0); +} + +static void rtl8168g_enable_gphy_10m(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11)); +} + +static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0); + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6)); + r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000); + phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int ret; + + r8169_apply_firmware(tp); + + ret = phy_read_paged(phydev, 0x0a46, 0x10); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0bcc, 0x12, BIT(15), 0); + else + phy_modify_paged(phydev, 0x0bcc, 0x12, 0, BIT(15)); + + ret = phy_read_paged(phydev, 0x0a46, 0x13); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0c41, 0x15, 0, BIT(1)); + else + phy_modify_paged(phydev, 0x0c41, 0x15, BIT(1), 0); + + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Improve SWR Efficiency */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x11, 0x5655); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 dout_tapbin; + u32 data; + + r8169_apply_firmware(tp); + + /* CHN EST parameters adjust - giga master */ + r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000); + r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00); + + /* CHN EST parameters adjust - giga slave */ + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000); + r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000); + + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00); + + /* enable R-tune & PGA-retune function */ + dout_tapbin = 0; + data = phy_read_paged(phydev, 0x0a46, 0x13); + data &= 3; + data <<= 2; + dout_tapbin |= data; + data = phy_read_paged(phydev, 0x0a46, 0x12); + data &= 0xc000; + data >>= 14; + dout_tapbin |= data; + dout_tapbin = ~(dout_tapbin ^ 0x08); + dout_tapbin <<= 12; + dout_tapbin &= 0xf000; + + r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + /* SAR ADC performance */ + phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14)); + + r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 ioffset, rlen; + u32 data; + + r8169_apply_firmware(tp); + + /* CHIN EST parameter update */ + r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a); + + /* enable R-tune & PGA-retune function */ + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + ioffset = rtl8168h_2_get_adc_bias_ioffset(tp); + if (ioffset != 0xffff) + phy_write_paged(phydev, 0x0bcf, 0x16, ioffset); + + /* Modify rlen (TX LPF corner frequency) level */ + data = phy_read_paged(phydev, 0x0bcd, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen << 4) | (rlen << 8) | (rlen << 12); + phy_write_paged(phydev, 0x0bcd, 0x17, data); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* Set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Channel estimation parameters */ + r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00); + r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00); + r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500); + r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00); + r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800); + r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400); + r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800); + r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00); + r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500); + r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100); + r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200); + r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400); + r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00); + r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00); + r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00); + r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00); + r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00); + r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400); + + /* Force PWM-mode */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x12, 0x00ed); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8117_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000); + + r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000); + r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00); + r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600); + r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000); + r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000); + r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00); + r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800); + r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200); + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800); + r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00); + r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300); + + r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800); + + rtl8168g_enable_gphy_10m(phydev); + + r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + phy_set_bits(phydev, 0x11, BIT(12)); + phy_set_bits(phydev, 0x19, BIT(13)); + phy_set_bits(phydev, 0x10, BIT(15)); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8401_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x11, BIT(12)); + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0003); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + phy_write_paged(phydev, 0x0005, 0x1a, 0x0000); + phy_write_paged(phydev, 0x0004, 0x1c, 0x0000); + phy_write_paged(phydev, 0x0001, 0x15, 0x7701); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before setting firmware */ + phy_write(phydev, 0x18, 0x0310); + msleep(20); + + r8169_apply_firmware(tp); + + /* EEE setting */ + phy_write(phydev, 0x1f, 0x0004); + phy_write(phydev, 0x10, 0x401f); + phy_write(phydev, 0x19, 0x7030); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8125_legacy_force_mode(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0); +} + +static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100); + phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000); + phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400); + phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff); + phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff); + + r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400); + r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300); + r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00); + r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000); + r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500); + r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300); + r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000); + r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500); + r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00); + r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100); + r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038); + r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6); + + phy_write_paged(phydev, 0xbc3, 0x12, 0x5555); + phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00); + phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int i; + + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000); + phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002); + phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044); + phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000); + phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002); + phy_write_paged(phydev, 0xad4, 0x16, 0x00a8); + phy_write_paged(phydev, 0xac5, 0x16, 0x01ff); + phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80a2); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x16, 0x809c); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x81B3); + phy_write(phydev, 0x14, 0x0043); + phy_write(phydev, 0x14, 0x00A7); + phy_write(phydev, 0x14, 0x00D6); + phy_write(phydev, 0x14, 0x00EC); + phy_write(phydev, 0x14, 0x00F6); + phy_write(phydev, 0x14, 0x00FB); + phy_write(phydev, 0x14, 0x00FD); + phy_write(phydev, 0x14, 0x00FF); + phy_write(phydev, 0x14, 0x00BB); + phy_write(phydev, 0x14, 0x0058); + phy_write(phydev, 0x14, 0x0029); + phy_write(phydev, 0x14, 0x0013); + phy_write(phydev, 0x14, 0x0009); + phy_write(phydev, 0x14, 0x0004); + phy_write(phydev, 0x14, 0x0002); + for (i = 0; i < 25; i++) + phy_write(phydev, 0x14, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F); + r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843); + + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000); + + r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100); + + phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00); + phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); + phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80f5); + phy_write(phydev, 0x17, 0x760e); + phy_write(phydev, 0x16, 0x8107); + phy_write(phydev, 0x17, 0x360e); + phy_write(phydev, 0x16, 0x8551); + phy_modify(phydev, 0x17, 0xff00, 0x0800); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); + phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); + + r8168g_phy_param(phydev, 0x8044, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x804a, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8050, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8056, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x805c, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8062, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8068, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x806e, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8074, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x807a, 0xffff, 0x2417); + + phy_modify_paged(phydev, 0xa4c, 0x15, 0x0000, 0x0040); + phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); + + rtl8125_legacy_force_mode(phydev); + rtl8125b_config_eee_phy(phydev); +} + +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver) +{ + static const rtl_phy_cfg_fct phy_configs[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config, + [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config, + [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, + [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, + [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config, + [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config, + [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, + [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, + [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config, + [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, + [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_31] = NULL, + [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config, + [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config, + [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config, + [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config, + [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config, + [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config, + [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config, + [RTL_GIGA_MAC_VER_41] = NULL, + [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config, + [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config, + [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, + [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, + }; + + if (phy_configs[ver]) + phy_configs[ver](tp, phydev); +} diff --git a/devices/r8169/r8169_phy_config-5.15-ethercat.c b/devices/r8169/r8169_phy_config-5.15-ethercat.c new file mode 100644 index 00000000..c8fef192 --- /dev/null +++ b/devices/r8169/r8169_phy_config-5.15-ethercat.c @@ -0,0 +1,1369 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169_phy_config.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169-5.15-ethercat.h" + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *tp, + struct phy_device *phydev); + +static void r8168d_modify_extpage(struct phy_device *phydev, int extpage, + int reg, u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0007); + + __phy_write(phydev, 0x1e, extpage); + __phy_modify(phydev, reg, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168d_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0005); + + __phy_write(phydev, 0x05, parm); + __phy_modify(phydev, 0x06, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168g_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0a43); + + __phy_write(phydev, 0x13, parm); + __phy_modify(phydev, 0x14, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void __rtl_writephy_batch(struct phy_device *phydev, + const struct phy_reg *regs, int len) +{ + phy_lock_mdio_bus(phydev); + + while (len-- > 0) { + __phy_write(phydev, regs->reg, regs->val); + regs++; + } + + phy_unlock_mdio_bus(phydev); +} + +#define rtl_writephy_batch(p, a) __rtl_writephy_batch(p, a, ARRAY_SIZE(a)) + +static void rtl8168f_config_eee_phy(struct phy_device *phydev) +{ + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8)); + r8168d_phy_param(phydev, 0x8b85, 0, BIT(13)); +} + +static void rtl8168g_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x11, 0, BIT(4)); +} + +static void rtl8168h_config_eee_phy(struct phy_device *phydev) +{ + rtl8168g_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080); +} + +static void rtl8125a_config_eee_phy(struct phy_device *phydev) +{ + rtl8168h_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); +} + +static void rtl8125b_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0002, 0x01, 0x90d0); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1f, 0x0001); + phy_set_bits(phydev, 0x16, BIT(0)); + phy_write(phydev, 0x10, 0xf41b); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x10, 0xf41b); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1d, 0x0f00); + phy_write_paged(phydev, 0x0002, 0x0c, 0x1ec8); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); + phy_write_paged(phydev, 0x0001, 0x1d, 0x3d98); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } +}; + +static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } +}; + +static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, + struct phy_device *phydev, + u16 val) +{ + u16 reg_val; + + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x001b); + reg_val = phy_read(phydev, 0x06); + phy_write(phydev, 0x1f, 0x0000); + + if (reg_val != val) + phydev_warn(phydev, "chipset not ready for firmware\n"); + else + r8169_apply_firmware(tp); +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x0b, 0x00ef, 0x0010); + phy_modify(phydev, 0x0c, 0x5d00, 0xa200); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x6662); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662); + } + + /* RSET couple improve */ + phy_write(phydev, 0x1f, 0x0002); + phy_set_bits(phydev, 0x0d, 0x0300); + phy_set_bits(phydev, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xbf00); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x2642); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642); + } + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + /* Switching regulator Slew rate */ + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0017); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xb300); +} + +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + }; + + rtl_writephy_batch(phydev, phy_reg_init); + r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x17, 0x0cc0); + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0xffff, 0x0040); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + }; + + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896); + + rtl_writephy_batch(phydev, phy_reg_init); + + /* Update PFM & 10M TX idle timer */ + r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919); + + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* DCO enable for 10M IDLE Power */ + r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006); + + /* For impedance matching */ + phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050); + phy_set_bits(phydev, 0x14, BIT(15)); + + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000); + + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000); + phy_write_paged(phydev, 0x0006, 0x00, 0x5a00); + + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Green Setting */ + r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222); + r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000); + r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000); + + /* For 4-corner performance improve */ + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x8b80); + phy_set_bits(phydev, 0x17, 0x0006); + phy_write(phydev, 0x1f, 0x0000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + rtl8168f_config_eee_phy(phydev); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_set_bits(phydev, 0x19, BIT(0)); + phy_set_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); + phy_modify_paged(phydev, 0x0005, 0x01, 0, BIT(8)); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* For 4-corner performance improve */ + r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* Improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + rtl8168f_config_eee_phy(phydev); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + /* Modify green table for giga */ + r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100); + r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000); + + /* uc same-seed solution */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_clear_bits(phydev, 0x19, BIT(0)); + phy_clear_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168g_disable_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0); +} + +static void rtl8168g_enable_gphy_10m(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11)); +} + +static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0); + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6)); + r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000); + phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int ret; + + r8169_apply_firmware(tp); + + ret = phy_read_paged(phydev, 0x0a46, 0x10); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0bcc, 0x12, BIT(15), 0); + else + phy_modify_paged(phydev, 0x0bcc, 0x12, 0, BIT(15)); + + ret = phy_read_paged(phydev, 0x0a46, 0x13); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0c41, 0x15, 0, BIT(1)); + else + phy_modify_paged(phydev, 0x0c41, 0x15, BIT(1), 0); + + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Improve SWR Efficiency */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x11, 0x5655); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 dout_tapbin; + u32 data; + + r8169_apply_firmware(tp); + + /* CHN EST parameters adjust - giga master */ + r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000); + r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00); + + /* CHN EST parameters adjust - giga slave */ + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000); + r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000); + + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00); + + /* enable R-tune & PGA-retune function */ + dout_tapbin = 0; + data = phy_read_paged(phydev, 0x0a46, 0x13); + data &= 3; + data <<= 2; + dout_tapbin |= data; + data = phy_read_paged(phydev, 0x0a46, 0x12); + data &= 0xc000; + data >>= 14; + dout_tapbin |= data; + dout_tapbin = ~(dout_tapbin ^ 0x08); + dout_tapbin <<= 12; + dout_tapbin &= 0xf000; + + r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + /* SAR ADC performance */ + phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14)); + + r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 ioffset, rlen; + u32 data; + + r8169_apply_firmware(tp); + + /* CHIN EST parameter update */ + r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a); + + /* enable R-tune & PGA-retune function */ + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + ioffset = rtl8168h_2_get_adc_bias_ioffset(tp); + if (ioffset != 0xffff) + phy_write_paged(phydev, 0x0bcf, 0x16, ioffset); + + /* Modify rlen (TX LPF corner frequency) level */ + data = phy_read_paged(phydev, 0x0bcd, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen << 4) | (rlen << 8) | (rlen << 12); + phy_write_paged(phydev, 0x0bcd, 0x17, data); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* Set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Channel estimation parameters */ + r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00); + r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00); + r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500); + r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00); + r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800); + r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400); + r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800); + r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00); + r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500); + r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100); + r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200); + r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400); + r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00); + r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00); + r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00); + r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00); + r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00); + r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400); + + /* Force PWM-mode */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x12, 0x00ed); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8117_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000); + + r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000); + r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00); + r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600); + r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000); + r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000); + r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00); + r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800); + r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200); + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800); + r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00); + r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300); + + r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800); + + rtl8168g_enable_gphy_10m(phydev); + + r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + phy_set_bits(phydev, 0x11, BIT(12)); + phy_set_bits(phydev, 0x19, BIT(13)); + phy_set_bits(phydev, 0x10, BIT(15)); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8401_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x11, BIT(12)); + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0003); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + phy_write_paged(phydev, 0x0005, 0x1a, 0x0000); + phy_write_paged(phydev, 0x0004, 0x1c, 0x0000); + phy_write_paged(phydev, 0x0001, 0x15, 0x7701); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before setting firmware */ + phy_write(phydev, 0x18, 0x0310); + msleep(20); + + r8169_apply_firmware(tp); + + /* EEE setting */ + phy_write(phydev, 0x1f, 0x0004); + phy_write(phydev, 0x10, 0x401f); + phy_write(phydev, 0x19, 0x7030); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8125_legacy_force_mode(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0); +} + +static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100); + phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000); + phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400); + phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff); + phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff); + + r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400); + r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300); + r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00); + r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000); + r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500); + r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300); + r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000); + r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500); + r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00); + r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100); + r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038); + r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6); + + phy_write_paged(phydev, 0xbc3, 0x12, 0x5555); + phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00); + phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int i; + + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000); + phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002); + phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044); + phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000); + phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002); + phy_write_paged(phydev, 0xad4, 0x16, 0x00a8); + phy_write_paged(phydev, 0xac5, 0x16, 0x01ff); + phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80a2); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x16, 0x809c); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x81B3); + phy_write(phydev, 0x14, 0x0043); + phy_write(phydev, 0x14, 0x00A7); + phy_write(phydev, 0x14, 0x00D6); + phy_write(phydev, 0x14, 0x00EC); + phy_write(phydev, 0x14, 0x00F6); + phy_write(phydev, 0x14, 0x00FB); + phy_write(phydev, 0x14, 0x00FD); + phy_write(phydev, 0x14, 0x00FF); + phy_write(phydev, 0x14, 0x00BB); + phy_write(phydev, 0x14, 0x0058); + phy_write(phydev, 0x14, 0x0029); + phy_write(phydev, 0x14, 0x0013); + phy_write(phydev, 0x14, 0x0009); + phy_write(phydev, 0x14, 0x0004); + phy_write(phydev, 0x14, 0x0002); + for (i = 0; i < 25; i++) + phy_write(phydev, 0x14, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F); + r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843); + + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000); + + r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100); + + phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00); + phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); + phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80f5); + phy_write(phydev, 0x17, 0x760e); + phy_write(phydev, 0x16, 0x8107); + phy_write(phydev, 0x17, 0x360e); + phy_write(phydev, 0x16, 0x8551); + phy_modify(phydev, 0x17, 0xff00, 0x0800); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); + phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); + + r8168g_phy_param(phydev, 0x8044, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x804a, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8050, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8056, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x805c, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8062, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8068, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x806e, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8074, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x807a, 0xffff, 0x2417); + + phy_modify_paged(phydev, 0xa4c, 0x15, 0x0000, 0x0040); + phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); + + rtl8125_legacy_force_mode(phydev); + rtl8125b_config_eee_phy(phydev); +} + +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver) +{ + static const rtl_phy_cfg_fct phy_configs[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config, + [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config, + [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, + [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, + [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config, + [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config, + [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, + [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, + [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config, + [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, + [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_31] = NULL, + [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config, + [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config, + [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config, + [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config, + [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config, + [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config, + [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config, + [RTL_GIGA_MAC_VER_41] = NULL, + [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config, + [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config, + [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, + [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, + }; + + if (phy_configs[ver]) + phy_configs[ver](tp, phydev); +} diff --git a/devices/r8169/r8169_phy_config-5.15-orig.c b/devices/r8169/r8169_phy_config-5.15-orig.c new file mode 100644 index 00000000..50f0f621 --- /dev/null +++ b/devices/r8169/r8169_phy_config-5.15-orig.c @@ -0,0 +1,1369 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169_phy_config.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169.h" + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *tp, + struct phy_device *phydev); + +static void r8168d_modify_extpage(struct phy_device *phydev, int extpage, + int reg, u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0007); + + __phy_write(phydev, 0x1e, extpage); + __phy_modify(phydev, reg, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168d_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0005); + + __phy_write(phydev, 0x05, parm); + __phy_modify(phydev, 0x06, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168g_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0a43); + + __phy_write(phydev, 0x13, parm); + __phy_modify(phydev, 0x14, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void __rtl_writephy_batch(struct phy_device *phydev, + const struct phy_reg *regs, int len) +{ + phy_lock_mdio_bus(phydev); + + while (len-- > 0) { + __phy_write(phydev, regs->reg, regs->val); + regs++; + } + + phy_unlock_mdio_bus(phydev); +} + +#define rtl_writephy_batch(p, a) __rtl_writephy_batch(p, a, ARRAY_SIZE(a)) + +static void rtl8168f_config_eee_phy(struct phy_device *phydev) +{ + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8)); + r8168d_phy_param(phydev, 0x8b85, 0, BIT(13)); +} + +static void rtl8168g_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x11, 0, BIT(4)); +} + +static void rtl8168h_config_eee_phy(struct phy_device *phydev) +{ + rtl8168g_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080); +} + +static void rtl8125a_config_eee_phy(struct phy_device *phydev) +{ + rtl8168h_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); +} + +static void rtl8125b_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0002, 0x01, 0x90d0); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1f, 0x0001); + phy_set_bits(phydev, 0x16, BIT(0)); + phy_write(phydev, 0x10, 0xf41b); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x10, 0xf41b); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1d, 0x0f00); + phy_write_paged(phydev, 0x0002, 0x0c, 0x1ec8); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); + phy_write_paged(phydev, 0x0001, 0x1d, 0x3d98); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } +}; + +static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } +}; + +static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, + struct phy_device *phydev, + u16 val) +{ + u16 reg_val; + + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x001b); + reg_val = phy_read(phydev, 0x06); + phy_write(phydev, 0x1f, 0x0000); + + if (reg_val != val) + phydev_warn(phydev, "chipset not ready for firmware\n"); + else + r8169_apply_firmware(tp); +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x0b, 0x00ef, 0x0010); + phy_modify(phydev, 0x0c, 0x5d00, 0xa200); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x6662); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662); + } + + /* RSET couple improve */ + phy_write(phydev, 0x1f, 0x0002); + phy_set_bits(phydev, 0x0d, 0x0300); + phy_set_bits(phydev, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xbf00); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + int val; + + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1); + + val = phy_read(phydev, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + phy_write(phydev, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x2642); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642); + } + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + /* Switching regulator Slew rate */ + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0017); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xb300); +} + +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + }; + + rtl_writephy_batch(phydev, phy_reg_init); + r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x17, 0x0cc0); + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0xffff, 0x0040); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + }; + + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896); + + rtl_writephy_batch(phydev, phy_reg_init); + + /* Update PFM & 10M TX idle timer */ + r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919); + + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* DCO enable for 10M IDLE Power */ + r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006); + + /* For impedance matching */ + phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050); + phy_set_bits(phydev, 0x14, BIT(15)); + + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000); + + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000); + phy_write_paged(phydev, 0x0006, 0x00, 0x5a00); + + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Green Setting */ + r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222); + r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000); + r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000); + + /* For 4-corner performance improve */ + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x8b80); + phy_set_bits(phydev, 0x17, 0x0006); + phy_write(phydev, 0x1f, 0x0000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + rtl8168f_config_eee_phy(phydev); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_set_bits(phydev, 0x19, BIT(0)); + phy_set_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); + phy_modify_paged(phydev, 0x0005, 0x01, 0, BIT(8)); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* For 4-corner performance improve */ + r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* Improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + rtl8168f_config_eee_phy(phydev); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + /* Modify green table for giga */ + r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100); + r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000); + + /* uc same-seed solution */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_clear_bits(phydev, 0x19, BIT(0)); + phy_clear_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168g_disable_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0); +} + +static void rtl8168g_enable_gphy_10m(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11)); +} + +static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0); + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6)); + r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000); + phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int ret; + + r8169_apply_firmware(tp); + + ret = phy_read_paged(phydev, 0x0a46, 0x10); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0bcc, 0x12, BIT(15), 0); + else + phy_modify_paged(phydev, 0x0bcc, 0x12, 0, BIT(15)); + + ret = phy_read_paged(phydev, 0x0a46, 0x13); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0c41, 0x15, 0, BIT(1)); + else + phy_modify_paged(phydev, 0x0c41, 0x15, BIT(1), 0); + + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Improve SWR Efficiency */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x11, 0x5655); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 dout_tapbin; + u32 data; + + r8169_apply_firmware(tp); + + /* CHN EST parameters adjust - giga master */ + r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000); + r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00); + + /* CHN EST parameters adjust - giga slave */ + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000); + r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000); + + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00); + + /* enable R-tune & PGA-retune function */ + dout_tapbin = 0; + data = phy_read_paged(phydev, 0x0a46, 0x13); + data &= 3; + data <<= 2; + dout_tapbin |= data; + data = phy_read_paged(phydev, 0x0a46, 0x12); + data &= 0xc000; + data >>= 14; + dout_tapbin |= data; + dout_tapbin = ~(dout_tapbin ^ 0x08); + dout_tapbin <<= 12; + dout_tapbin &= 0xf000; + + r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + /* SAR ADC performance */ + phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14)); + + r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 ioffset, rlen; + u32 data; + + r8169_apply_firmware(tp); + + /* CHIN EST parameter update */ + r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a); + + /* enable R-tune & PGA-retune function */ + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + ioffset = rtl8168h_2_get_adc_bias_ioffset(tp); + if (ioffset != 0xffff) + phy_write_paged(phydev, 0x0bcf, 0x16, ioffset); + + /* Modify rlen (TX LPF corner frequency) level */ + data = phy_read_paged(phydev, 0x0bcd, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen << 4) | (rlen << 8) | (rlen << 12); + phy_write_paged(phydev, 0x0bcd, 0x17, data); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* Set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Channel estimation parameters */ + r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00); + r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00); + r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500); + r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00); + r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800); + r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400); + r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800); + r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00); + r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500); + r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100); + r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200); + r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400); + r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00); + r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00); + r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00); + r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00); + r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00); + r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400); + + /* Force PWM-mode */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x12, 0x00ed); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8117_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000); + + r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000); + r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00); + r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600); + r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000); + r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000); + r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00); + r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800); + r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200); + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800); + r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00); + r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300); + + r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800); + + rtl8168g_enable_gphy_10m(phydev); + + r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + phy_set_bits(phydev, 0x11, BIT(12)); + phy_set_bits(phydev, 0x19, BIT(13)); + phy_set_bits(phydev, 0x10, BIT(15)); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8401_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x11, BIT(12)); + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0003); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + phy_write_paged(phydev, 0x0005, 0x1a, 0x0000); + phy_write_paged(phydev, 0x0004, 0x1c, 0x0000); + phy_write_paged(phydev, 0x0001, 0x15, 0x7701); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before setting firmware */ + phy_write(phydev, 0x18, 0x0310); + msleep(20); + + r8169_apply_firmware(tp); + + /* EEE setting */ + phy_write(phydev, 0x1f, 0x0004); + phy_write(phydev, 0x10, 0x401f); + phy_write(phydev, 0x19, 0x7030); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8125_legacy_force_mode(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0); +} + +static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100); + phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000); + phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400); + phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff); + phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff); + + r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400); + r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300); + r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00); + r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000); + r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500); + r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300); + r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000); + r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500); + r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00); + r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100); + r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038); + r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6); + + phy_write_paged(phydev, 0xbc3, 0x12, 0x5555); + phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00); + phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int i; + + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000); + phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002); + phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044); + phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000); + phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002); + phy_write_paged(phydev, 0xad4, 0x16, 0x00a8); + phy_write_paged(phydev, 0xac5, 0x16, 0x01ff); + phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80a2); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x16, 0x809c); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x81B3); + phy_write(phydev, 0x14, 0x0043); + phy_write(phydev, 0x14, 0x00A7); + phy_write(phydev, 0x14, 0x00D6); + phy_write(phydev, 0x14, 0x00EC); + phy_write(phydev, 0x14, 0x00F6); + phy_write(phydev, 0x14, 0x00FB); + phy_write(phydev, 0x14, 0x00FD); + phy_write(phydev, 0x14, 0x00FF); + phy_write(phydev, 0x14, 0x00BB); + phy_write(phydev, 0x14, 0x0058); + phy_write(phydev, 0x14, 0x0029); + phy_write(phydev, 0x14, 0x0013); + phy_write(phydev, 0x14, 0x0009); + phy_write(phydev, 0x14, 0x0004); + phy_write(phydev, 0x14, 0x0002); + for (i = 0; i < 25; i++) + phy_write(phydev, 0x14, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F); + r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843); + + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000); + + r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100); + + phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00); + phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); + phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80f5); + phy_write(phydev, 0x17, 0x760e); + phy_write(phydev, 0x16, 0x8107); + phy_write(phydev, 0x17, 0x360e); + phy_write(phydev, 0x16, 0x8551); + phy_modify(phydev, 0x17, 0xff00, 0x0800); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); + phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); + + r8168g_phy_param(phydev, 0x8044, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x804a, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8050, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8056, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x805c, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8062, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8068, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x806e, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8074, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x807a, 0xffff, 0x2417); + + phy_modify_paged(phydev, 0xa4c, 0x15, 0x0000, 0x0040); + phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); + + rtl8125_legacy_force_mode(phydev); + rtl8125b_config_eee_phy(phydev); +} + +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver) +{ + static const rtl_phy_cfg_fct phy_configs[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config, + [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config, + [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, + [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, + [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config, + [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config, + [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, + [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, + [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config, + [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, + [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_31] = NULL, + [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config, + [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config, + [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config, + [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config, + [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config, + [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config, + [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config, + [RTL_GIGA_MAC_VER_41] = NULL, + [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config, + [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config, + [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, + [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, + }; + + if (phy_configs[ver]) + phy_configs[ver](tp, phydev); +} diff --git a/devices/r8169/r8169_phy_config-6.1-ethercat.c b/devices/r8169/r8169_phy_config-6.1-ethercat.c new file mode 100644 index 00000000..4062b61b --- /dev/null +++ b/devices/r8169/r8169_phy_config-6.1-ethercat.c @@ -0,0 +1,1159 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169_phy_config.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169-6.1-ethercat.h" + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *tp, + struct phy_device *phydev); + +static void r8168d_modify_extpage(struct phy_device *phydev, int extpage, + int reg, u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0007); + + __phy_write(phydev, 0x1e, extpage); + __phy_modify(phydev, reg, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168d_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0005); + + __phy_write(phydev, 0x05, parm); + __phy_modify(phydev, 0x06, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168g_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0a43); + + __phy_write(phydev, 0x13, parm); + __phy_modify(phydev, 0x14, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void __rtl_writephy_batch(struct phy_device *phydev, + const struct phy_reg *regs, int len) +{ + phy_lock_mdio_bus(phydev); + + while (len-- > 0) { + __phy_write(phydev, regs->reg, regs->val); + regs++; + } + + phy_unlock_mdio_bus(phydev); +} + +#define rtl_writephy_batch(p, a) __rtl_writephy_batch(p, a, ARRAY_SIZE(a)) + +static void rtl8168f_config_eee_phy(struct phy_device *phydev) +{ + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8)); + r8168d_phy_param(phydev, 0x8b85, 0, BIT(13)); +} + +static void rtl8168g_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x11, 0, BIT(4)); +} + +static void rtl8168h_config_eee_phy(struct phy_device *phydev) +{ + rtl8168g_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080); +} + +static void rtl8125a_config_eee_phy(struct phy_device *phydev) +{ + rtl8168h_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); +} + +static void rtl8125b_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0002, 0x01, 0x90d0); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1f, 0x0001); + phy_set_bits(phydev, 0x16, BIT(0)); + phy_write(phydev, 0x10, 0xf41b); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x10, 0xf41b); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1d, 0x0f00); + phy_write_paged(phydev, 0x0002, 0x0c, 0x1ec8); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); + phy_write_paged(phydev, 0x0001, 0x1d, 0x3d98); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } +}; + +static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, + struct phy_device *phydev, + u16 val) +{ + u16 reg_val; + + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x001b); + reg_val = phy_read(phydev, 0x06); + phy_write(phydev, 0x1f, 0x0000); + + if (reg_val != val) + phydev_warn(phydev, "chipset not ready for firmware\n"); + else + r8169_apply_firmware(tp); +} + +static void rtl8168d_1_common(struct phy_device *phydev) +{ + u16 val; + + phy_write_paged(phydev, 0x0002, 0x05, 0x669a); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x669a); + phy_write(phydev, 0x1f, 0x0002); + + val = phy_read(phydev, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u16 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x0b, 0x00ef, 0x0010); + phy_modify(phydev, 0x0c, 0x5d00, 0xa200); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + rtl8168d_1_common(phydev); + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x6662); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662); + } + + /* RSET couple improve */ + phy_write(phydev, 0x1f, 0x0002); + phy_set_bits(phydev, 0x0d, 0x0300); + phy_set_bits(phydev, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xbf00); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + rtl8168d_1_common(phydev); + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x2642); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642); + } + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + /* Switching regulator Slew rate */ + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0017); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xb300); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x17, 0x0cc0); + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0xffff, 0x0040); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + }; + + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896); + + rtl_writephy_batch(phydev, phy_reg_init); + + /* Update PFM & 10M TX idle timer */ + r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919); + + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* DCO enable for 10M IDLE Power */ + r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006); + + /* For impedance matching */ + phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050); + phy_set_bits(phydev, 0x14, BIT(15)); + + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000); + + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000); + phy_write_paged(phydev, 0x0006, 0x00, 0x5a00); + + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Green Setting */ + r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222); + r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000); + r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000); + + /* For 4-corner performance improve */ + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x8b80); + phy_set_bits(phydev, 0x17, 0x0006); + phy_write(phydev, 0x1f, 0x0000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + rtl8168f_config_eee_phy(phydev); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_set_bits(phydev, 0x19, BIT(0)); + phy_set_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); + phy_modify_paged(phydev, 0x0005, 0x01, 0, BIT(8)); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* For 4-corner performance improve */ + r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* Improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + rtl8168f_config_eee_phy(phydev); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + /* Modify green table for giga */ + r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100); + r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000); + + /* uc same-seed solution */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_clear_bits(phydev, 0x19, BIT(0)); + phy_clear_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168g_disable_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0); +} + +static void rtl8168g_enable_gphy_10m(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11)); +} + +static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0); + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6)); + r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000); + phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int ret; + + r8169_apply_firmware(tp); + + ret = phy_read_paged(phydev, 0x0a46, 0x10); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0bcc, 0x12, BIT(15), 0); + else + phy_modify_paged(phydev, 0x0bcc, 0x12, 0, BIT(15)); + + ret = phy_read_paged(phydev, 0x0a46, 0x13); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0c41, 0x15, 0, BIT(1)); + else + phy_modify_paged(phydev, 0x0c41, 0x15, BIT(1), 0); + + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Improve SWR Efficiency */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x11, 0x5655); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 ioffset, rlen; + u32 data; + + r8169_apply_firmware(tp); + + /* CHIN EST parameter update */ + r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a); + + /* enable R-tune & PGA-retune function */ + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + ioffset = rtl8168h_2_get_adc_bias_ioffset(tp); + if (ioffset != 0xffff) + phy_write_paged(phydev, 0x0bcf, 0x16, ioffset); + + /* Modify rlen (TX LPF corner frequency) level */ + data = phy_read_paged(phydev, 0x0bcd, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen << 4) | (rlen << 8) | (rlen << 12); + phy_write_paged(phydev, 0x0bcd, 0x17, data); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + /* disable 10m pll off */ + phy_modify_paged(phydev, 0x0a43, 0x10, BIT(0), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* Set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Channel estimation parameters */ + r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00); + r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00); + r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500); + r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00); + r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800); + r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400); + r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800); + r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00); + r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500); + r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100); + r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200); + r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400); + r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00); + r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00); + r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00); + r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00); + r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00); + r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400); + + /* Force PWM-mode */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x12, 0x00ed); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8117_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000); + + r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000); + r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00); + r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600); + r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000); + r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000); + r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00); + r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800); + r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200); + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800); + r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00); + r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300); + + r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800); + + rtl8168g_enable_gphy_10m(phydev); + + r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + phy_set_bits(phydev, 0x11, BIT(12)); + phy_set_bits(phydev, 0x19, BIT(13)); + phy_set_bits(phydev, 0x10, BIT(15)); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8401_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x11, BIT(12)); + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0003); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + phy_write_paged(phydev, 0x0005, 0x1a, 0x0000); + phy_write_paged(phydev, 0x0004, 0x1c, 0x0000); + phy_write_paged(phydev, 0x0001, 0x15, 0x7701); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before setting firmware */ + phy_write(phydev, 0x18, 0x0310); + msleep(20); + + r8169_apply_firmware(tp); + + /* EEE setting */ + phy_write(phydev, 0x1f, 0x0004); + phy_write(phydev, 0x10, 0x401f); + phy_write(phydev, 0x19, 0x7030); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8125_legacy_force_mode(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0); +} + +static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int i; + + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000); + phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002); + phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044); + phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000); + phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002); + phy_write_paged(phydev, 0xad4, 0x16, 0x00a8); + phy_write_paged(phydev, 0xac5, 0x16, 0x01ff); + phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80a2); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x16, 0x809c); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x81B3); + phy_write(phydev, 0x14, 0x0043); + phy_write(phydev, 0x14, 0x00A7); + phy_write(phydev, 0x14, 0x00D6); + phy_write(phydev, 0x14, 0x00EC); + phy_write(phydev, 0x14, 0x00F6); + phy_write(phydev, 0x14, 0x00FB); + phy_write(phydev, 0x14, 0x00FD); + phy_write(phydev, 0x14, 0x00FF); + phy_write(phydev, 0x14, 0x00BB); + phy_write(phydev, 0x14, 0x0058); + phy_write(phydev, 0x14, 0x0029); + phy_write(phydev, 0x14, 0x0013); + phy_write(phydev, 0x14, 0x0009); + phy_write(phydev, 0x14, 0x0004); + phy_write(phydev, 0x14, 0x0002); + for (i = 0; i < 25; i++) + phy_write(phydev, 0x14, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F); + r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843); + + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000); + + r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100); + + phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00); + phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); + phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80f5); + phy_write(phydev, 0x17, 0x760e); + phy_write(phydev, 0x16, 0x8107); + phy_write(phydev, 0x17, 0x360e); + phy_write(phydev, 0x16, 0x8551); + phy_modify(phydev, 0x17, 0xff00, 0x0800); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); + phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); + + r8168g_phy_param(phydev, 0x8044, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x804a, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8050, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8056, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x805c, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8062, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8068, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x806e, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8074, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x807a, 0xffff, 0x2417); + + phy_modify_paged(phydev, 0xa4c, 0x15, 0x0000, 0x0040); + phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); + + rtl8125_legacy_force_mode(phydev); + rtl8125b_config_eee_phy(phydev); +} + +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver) +{ + static const rtl_phy_cfg_fct phy_configs[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config, + [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config, + [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, + [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, + [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, + [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config, + [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config, + [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, + [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, + [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, + [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_31] = NULL, + [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config, + [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config, + [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config, + [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config, + [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config, + [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config, + [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config, + [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, + [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, + }; + + if (phy_configs[ver]) + phy_configs[ver](tp, phydev); +} diff --git a/devices/r8169/r8169_phy_config-6.1-orig.c b/devices/r8169/r8169_phy_config-6.1-orig.c new file mode 100644 index 00000000..b50f1678 --- /dev/null +++ b/devices/r8169/r8169_phy_config-6.1-orig.c @@ -0,0 +1,1159 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * r8169_phy_config.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include + +#include "r8169.h" + +typedef void (*rtl_phy_cfg_fct)(struct rtl8169_private *tp, + struct phy_device *phydev); + +static void r8168d_modify_extpage(struct phy_device *phydev, int extpage, + int reg, u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0007); + + __phy_write(phydev, 0x1e, extpage); + __phy_modify(phydev, reg, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168d_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0005); + + __phy_write(phydev, 0x05, parm); + __phy_modify(phydev, 0x06, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168g_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0a43); + + __phy_write(phydev, 0x13, parm); + __phy_modify(phydev, 0x14, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void __rtl_writephy_batch(struct phy_device *phydev, + const struct phy_reg *regs, int len) +{ + phy_lock_mdio_bus(phydev); + + while (len-- > 0) { + __phy_write(phydev, regs->reg, regs->val); + regs++; + } + + phy_unlock_mdio_bus(phydev); +} + +#define rtl_writephy_batch(p, a) __rtl_writephy_batch(p, a, ARRAY_SIZE(a)) + +static void rtl8168f_config_eee_phy(struct phy_device *phydev) +{ + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8)); + r8168d_phy_param(phydev, 0x8b85, 0, BIT(13)); +} + +static void rtl8168g_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x11, 0, BIT(4)); +} + +static void rtl8168h_config_eee_phy(struct phy_device *phydev) +{ + rtl8168g_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080); +} + +static void rtl8125a_config_eee_phy(struct phy_device *phydev) +{ + rtl8168h_config_eee_phy(phydev); + + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); +} + +static void rtl8125b_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0002, 0x01, 0x90d0); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1f, 0x0001); + phy_set_bits(phydev, 0x16, BIT(0)); + phy_write(phydev, 0x10, 0xf41b); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x10, 0xf41b); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write(phydev, 0x1d, 0x0f00); + phy_write_paged(phydev, 0x0002, 0x0c, 0x1ec8); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); + phy_write_paged(phydev, 0x0001, 0x1d, 0x3d98); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(phydev, phy_reg_init); + + phy_set_bits(phydev, 0x16, BIT(0)); + phy_set_bits(phydev, 0x14, BIT(5)); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } +}; + +static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, + struct phy_device *phydev, + u16 val) +{ + u16 reg_val; + + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x001b); + reg_val = phy_read(phydev, 0x06); + phy_write(phydev, 0x1f, 0x0000); + + if (reg_val != val) + phydev_warn(phydev, "chipset not ready for firmware\n"); + else + r8169_apply_firmware(tp); +} + +static void rtl8168d_1_common(struct phy_device *phydev) +{ + u16 val; + + phy_write_paged(phydev, 0x0002, 0x05, 0x669a); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x669a); + phy_write(phydev, 0x1f, 0x0002); + + val = phy_read(phydev, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u16 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + phy_write(phydev, 0x0d, val | set[i]); + } +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x0b, 0x00ef, 0x0010); + phy_modify(phydev, 0x0c, 0x5d00, 0xa200); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + rtl8168d_1_common(phydev); + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x6662); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662); + } + + /* RSET couple improve */ + phy_write(phydev, 0x1f, 0x0002); + phy_set_bits(phydev, 0x0d, 0x0300); + phy_set_bits(phydev, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xbf00); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + rtl8168d_1_common(phydev); + } else { + phy_write_paged(phydev, 0x0002, 0x05, 0x2642); + r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642); + } + + /* Fine tune PLL performance */ + phy_write(phydev, 0x1f, 0x0002); + phy_modify(phydev, 0x02, 0x0600, 0x0100); + phy_clear_bits(phydev, 0x03, 0xe000); + phy_write(phydev, 0x1f, 0x0000); + + /* Switching regulator Slew rate */ + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0017); + + rtl8168d_apply_firmware_cond(tp, phydev, 0xb300); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_write_paged(phydev, 0x0001, 0x17, 0x0cc0); + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0xffff, 0x0040); + phy_set_bits(phydev, 0x0d, BIT(5)); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + }; + + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896); + + rtl_writephy_batch(phydev, phy_reg_init); + + /* Update PFM & 10M TX idle timer */ + r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919); + + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* DCO enable for 10M IDLE Power */ + r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006); + + /* For impedance matching */ + phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050); + phy_set_bits(phydev, 0x14, BIT(15)); + + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000); + + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000); + phy_write_paged(phydev, 0x0006, 0x00, 0x5a00); + + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Enable Delay cap */ + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Green Setting */ + r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222); + r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000); + r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000); + + /* For 4-corner performance improve */ + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x8b80); + phy_set_bits(phydev, 0x17, 0x0006); + phy_write(phydev, 0x1f, 0x0000); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + rtl8168f_config_eee_phy(phydev); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_set_bits(phydev, 0x19, BIT(0)); + phy_set_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); + phy_modify_paged(phydev, 0x0005, 0x01, 0, BIT(8)); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* For 4-corner performance improve */ + r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006); + + /* PHY auto speed down */ + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); + + /* Improve 10M EEE waveform */ + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + + rtl8168f_config_eee_phy(phydev); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp, phydev); + + /* Improve 2-pair detection performance */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); + + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); + + /* Modify green table for giga */ + r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100); + r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000); + + /* uc same-seed solution */ + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000); + + /* Green feature */ + phy_write(phydev, 0x1f, 0x0003); + phy_clear_bits(phydev, 0x19, BIT(0)); + phy_clear_bits(phydev, 0x10, BIT(10)); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168g_disable_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0); +} + +static void rtl8168g_enable_gphy_10m(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11)); +} + +static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0); + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6)); + r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000); + phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int ret; + + r8169_apply_firmware(tp); + + ret = phy_read_paged(phydev, 0x0a46, 0x10); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0bcc, 0x12, BIT(15), 0); + else + phy_modify_paged(phydev, 0x0bcc, 0x12, 0, BIT(15)); + + ret = phy_read_paged(phydev, 0x0a46, 0x13); + if (ret & BIT(8)) + phy_modify_paged(phydev, 0x0c41, 0x15, 0, BIT(1)); + else + phy_modify_paged(phydev, 0x0c41, 0x15, BIT(1), 0); + + /* Enable PHY auto speed down */ + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* EEE auto-fallback function */ + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Improve SWR Efficiency */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x11, 0x5655); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + u16 ioffset, rlen; + u32 data; + + r8169_apply_firmware(tp); + + /* CHIN EST parameter update */ + r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a); + + /* enable R-tune & PGA-retune function */ + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); + + rtl8168g_enable_gphy_10m(phydev); + + ioffset = rtl8168h_2_get_adc_bias_ioffset(tp); + if (ioffset != 0xffff) + phy_write_paged(phydev, 0x0bcf, 0x16, ioffset); + + /* Modify rlen (TX LPF corner frequency) level */ + data = phy_read_paged(phydev, 0x0bcd, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen << 4) | (rlen << 8) | (rlen << 12); + phy_write_paged(phydev, 0x0bcd, 0x17, data); + + /* disable phy pfm mode */ + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); + + /* disable 10m pll off */ + phy_modify_paged(phydev, 0x0a43, 0x10, BIT(0), 0); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + rtl8168g_phy_adjust_10m_aldps(phydev); + + /* Enable UC LPF tune function */ + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); + + /* Set rg_sel_sdm_rate */ + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + + /* Channel estimation parameters */ + r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00); + r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00); + r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500); + r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00); + r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800); + r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400); + r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800); + r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00); + r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500); + r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100); + r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200); + r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400); + r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00); + r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00); + r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00); + r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00); + r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00); + r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400); + + /* Force PWM-mode */ + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x5065); + phy_write(phydev, 0x14, 0xd065); + phy_write(phydev, 0x1f, 0x0bc8); + phy_write(phydev, 0x12, 0x00ed); + phy_write(phydev, 0x1f, 0x0bcd); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x14, 0x9065); + phy_write(phydev, 0x14, 0x1065); + phy_write(phydev, 0x1f, 0x0000); + + rtl8168g_disable_aldps(phydev); + rtl8168g_config_eee_phy(phydev); +} + +static void rtl8117_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000); + + r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000); + r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00); + r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600); + r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000); + r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000); + r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00); + r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800); + r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200); + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800); + r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00); + r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300); + + r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800); + + rtl8168g_enable_gphy_10m(phydev); + + r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400); + + rtl8168g_disable_aldps(phydev); + rtl8168h_config_eee_phy(phydev); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + phy_set_bits(phydev, 0x11, BIT(12)); + phy_set_bits(phydev, 0x19, BIT(13)); + phy_set_bits(phydev, 0x10, BIT(15)); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8401_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x11, BIT(12)); + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0003); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + phy_write_paged(phydev, 0x0005, 0x1a, 0x0000); + phy_write_paged(phydev, 0x0004, 0x1c, 0x0000); + phy_write_paged(phydev, 0x0001, 0x15, 0x7701); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + /* Disable ALDPS before setting firmware */ + phy_write(phydev, 0x18, 0x0310); + msleep(20); + + r8169_apply_firmware(tp); + + /* EEE setting */ + phy_write(phydev, 0x1f, 0x0004); + phy_write(phydev, 0x10, 0x401f); + phy_write(phydev, 0x19, 0x7030); + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + phy_write(phydev, 0x18, 0x0310); + msleep(100); + + r8169_apply_firmware(tp); + + rtl_writephy_batch(phydev, phy_reg_init); +} + +static void rtl8125_legacy_force_mode(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0); +} + +static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + int i; + + phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); + phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff); + phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006); + phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000); + phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002); + phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044); + phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000); + phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002); + phy_write_paged(phydev, 0xad4, 0x16, 0x00a8); + phy_write_paged(phydev, 0xac5, 0x16, 0x01ff); + phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80a2); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x16, 0x809c); + phy_write(phydev, 0x17, 0x0153); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x81B3); + phy_write(phydev, 0x14, 0x0043); + phy_write(phydev, 0x14, 0x00A7); + phy_write(phydev, 0x14, 0x00D6); + phy_write(phydev, 0x14, 0x00EC); + phy_write(phydev, 0x14, 0x00F6); + phy_write(phydev, 0x14, 0x00FB); + phy_write(phydev, 0x14, 0x00FD); + phy_write(phydev, 0x14, 0x00FF); + phy_write(phydev, 0x14, 0x00BB); + phy_write(phydev, 0x14, 0x0058); + phy_write(phydev, 0x14, 0x0029); + phy_write(phydev, 0x14, 0x0013); + phy_write(phydev, 0x14, 0x0009); + phy_write(phydev, 0x14, 0x0004); + phy_write(phydev, 0x14, 0x0002); + for (i = 0; i < 25; i++) + phy_write(phydev, 0x14, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F); + r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843); + + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000); + + r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100); + + phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00); + phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020); + phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); + rtl8168g_enable_gphy_10m(phydev); + + rtl8125a_config_eee_phy(phydev); +} + +static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); + phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80f5); + phy_write(phydev, 0x17, 0x760e); + phy_write(phydev, 0x16, 0x8107); + phy_write(phydev, 0x17, 0x360e); + phy_write(phydev, 0x16, 0x8551); + phy_modify(phydev, 0x17, 0xff00, 0x0800); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); + phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); + + r8168g_phy_param(phydev, 0x8044, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x804a, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8050, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8056, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x805c, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8062, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8068, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x806e, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8074, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x807a, 0xffff, 0x2417); + + phy_modify_paged(phydev, 0xa4c, 0x15, 0x0000, 0x0040); + phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); + + rtl8125_legacy_force_mode(phydev); + rtl8125b_config_eee_phy(phydev); +} + +void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, + enum mac_version ver) +{ + static const rtl_phy_cfg_fct phy_configs[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config, + [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config, + [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, + [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, + [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, + [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config, + [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config, + [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, + [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, + [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, + [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_31] = NULL, + [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config, + [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config, + [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config, + [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config, + [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config, + [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config, + [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config, + [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config, + [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, + [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, + }; + + if (phy_configs[ver]) + phy_configs[ver](tp, phydev); +} diff --git a/include/ecrt.h b/include/ecrt.h index 3f6b4b19..6978a37d 100644 --- a/include/ecrt.h +++ b/include/ecrt.h @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2012 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT master userspace library. * @@ -43,6 +41,13 @@ * * - Added the ecrt_slave_config_flag() method and the EC_HAVE_FLAGS * definition to check for its existence. + * - Added SoE IDN requests, including the datatype ec_soe_request_t and the + * methods ecrt_slave_config_create_soe_request(), + * ecrt_soe_request_object(), ecrt_soe_request_timeout(), + * ecrt_soe_request_data(), ecrt_soe_request_data_size(), + * ecrt_soe_request_state(), ecrt_soe_request_write() and + * ecrt_soe_request_read(). Use the EC_HAVE_SOE_REQUESTS to check, if the + * functionality is available. * * Changes in version 1.5.2: * @@ -52,7 +57,7 @@ * - Added the EC_HAVE_REDUNDANCY define, to check, if the interface contains * redundancy features. * - Added ecrt_sdo_request_index() to change SDO index and subindex after - * handler creation. + * request creation. * - Added interface for retrieving CoE emergency messages, i. e. * ecrt_slave_config_emerg_size(), ecrt_slave_config_emerg_pop(), * ecrt_slave_config_emerg_clear(), ecrt_slave_config_emerg_overruns() and @@ -200,6 +205,14 @@ */ #define EC_HAVE_FLAGS +/** Defined if the methods ecrt_slave_config_create_soe_request(), + * ecrt_soe_request_object(), ecrt_soe_request_timeout(), + * ecrt_soe_request_data(), ecrt_soe_request_data_size(), + * ecrt_soe_request_state(), ecrt_soe_request_write() and + * ecrt_soe_request_read() and the datatype ec_soe_request_t are available. + */ +#define EC_HAVE_SOE_REQUESTS + /*****************************************************************************/ /** End of list marker. @@ -254,11 +267,14 @@ typedef struct ec_domain ec_domain_t; /**< \see ec_domain */ struct ec_sdo_request; typedef struct ec_sdo_request ec_sdo_request_t; /**< \see ec_sdo_request. */ +struct ec_soe_request; +typedef struct ec_soe_request ec_soe_request_t; /**< \see ec_soe_request. */ + struct ec_voe_handler; typedef struct ec_voe_handler ec_voe_handler_t; /**< \see ec_voe_handler. */ struct ec_reg_request; -typedef struct ec_reg_request ec_reg_request_t; /**< \see ec_sdo_request. */ +typedef struct ec_reg_request ec_reg_request_t; /**< \see ec_reg_request. */ /*****************************************************************************/ @@ -1559,6 +1575,23 @@ ec_sdo_request_t *ecrt_slave_config_create_sdo_request( size_t size /**< Data size to reserve. */ ); +/** Create an SoE request to exchange SoE IDNs during realtime operation. + * + * The created SoE request object is freed automatically when the master is + * released. + * + * This method has to be called in non-realtime context before + * ecrt_master_activate(). + * + * \return New SoE request, or NULL on error. + */ +ec_soe_request_t *ecrt_slave_config_create_soe_request( + ec_slave_config_t *sc, /**< Slave configuration. */ + uint8_t drive_no, /**< Drive number. */ + uint16_t idn, /**< Sercos ID-Number. */ + size_t size /**< Data size to reserve. */ + ); + /** Create an VoE handler to exchange vendor-specific data during realtime * operation. * @@ -1656,6 +1689,9 @@ int ecrt_slave_config_idn( * assigned to EtherCAT (except during transition to PREOP). Non-zero * assigns the SII to the slave controller side before going to PREOP and * leaves it there until a write command happens. + * - WaitBeforeSAFEOPms: Number of milliseconds to wait before commanding the + * transition from PREOP to SAFEOP. This can be used as a workaround for + * slaves that need a little time to initialize. * * This method has to be called in non-realtime context before * ecrt_master_activate(). @@ -1874,6 +1910,105 @@ void ecrt_sdo_request_read( ec_sdo_request_t *req /**< SDO request. */ ); +/***************************************************************************** + * SoE request methods. + ****************************************************************************/ + +/** Set the request's drive and Sercos ID numbers. + * + * \attention If the drive number and/or IDN is changed while + * ecrt_soe_request_state() returns EC_REQUEST_BUSY, this may lead to + * unexpected results. + */ +void ecrt_soe_request_idn( + ec_soe_request_t *req, /**< IDN request. */ + uint8_t drive_no, /**< SDO index. */ + uint16_t idn /**< SoE IDN. */ + ); + +/** Set the timeout for an SoE request. + * + * If the request cannot be processed in the specified time, if will be marked + * as failed. + * + * The timeout is permanently stored in the request object and is valid until + * the next call of this method. + */ +void ecrt_soe_request_timeout( + ec_soe_request_t *req, /**< SoE request. */ + uint32_t timeout /**< Timeout in milliseconds. Zero means no + timeout. */ + ); + +/** Access to the SoE request's data. + * + * This function returns a pointer to the request's internal IDN data memory. + * + * - After a read operation was successful, integer data can be evaluated + * using the EC_READ_*() macros as usual. Example: + * \code + * uint16_t value = EC_READ_U16(ecrt_soe_request_data(idn_req))); + * \endcode + * - If a write operation shall be triggered, the data have to be written to + * the internal memory. Use the EC_WRITE_*() macros, if you are writing + * integer data. Be sure, that the data fit into the memory. The memory size + * is a parameter of ecrt_slave_config_create_soe_request(). + * \code + * EC_WRITE_U16(ecrt_soe_request_data(idn_req), 0xFFFF); + * \endcode + * + * \attention The return value can be invalidated during a read operation, + * because the internal IDN data memory could be re-allocated if the read IDN + * data do not fit inside. + * + * \return Pointer to the internal IDN data memory. + */ +uint8_t *ecrt_soe_request_data( + ec_soe_request_t *req /**< SoE request. */ + ); + +/** Returns the current IDN data size. + * + * When the SoE request is created, the data size is set to the size of the + * reserved memory. After a read operation the size is set to the size of the + * read data. The size is not modified in any other situation. + * + * \return IDN data size in bytes. + */ +size_t ecrt_soe_request_data_size( + const ec_soe_request_t *req /**< SoE request. */ + ); + +/** Get the current state of the SoE request. + * + * \return Request state. + */ +ec_request_state_t ecrt_soe_request_state( + ec_soe_request_t *req /**< SoE request. */ + ); + +/** Schedule an SoE IDN write operation. + * + * \attention This method may not be called while ecrt_soe_request_state() + * returns EC_REQUEST_BUSY. + */ +void ecrt_soe_request_write( + ec_soe_request_t *req /**< SoE request. */ + ); + +/** Schedule an SoE IDN read operation. + * + * \attention This method may not be called while ecrt_soe_request_state() + * returns EC_REQUEST_BUSY. + * + * \attention After calling this function, the return value of + * ecrt_soe_request_data() must be considered as invalid while + * ecrt_soe_request_state() returns EC_REQUEST_BUSY. + */ +void ecrt_soe_request_read( + ec_soe_request_t *req /**< SoE request. */ + ); + /***************************************************************************** * VoE handler methods. ****************************************************************************/ diff --git a/lib/Makefile.am b/lib/Makefile.am index 330c1e34..ebad06b4 100644 --- a/lib/Makefile.am +++ b/lib/Makefile.am @@ -1,8 +1,6 @@ #------------------------------------------------------------------------------ # -# $Id$ -# -# Copyright (C) 2006-2012 Florian Pose, Ingenieurgemeinschaft IgH +# Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH # # This file is part of the IgH EtherCAT master userspace library. # @@ -39,6 +37,7 @@ libethercat_la_SOURCES = \ reg_request.c \ sdo_request.c \ slave_config.c \ + soe_request.c \ voe_handler.c noinst_HEADERS = \ @@ -48,6 +47,7 @@ noinst_HEADERS = \ reg_request.h \ sdo_request.h \ slave_config.h \ + soe_request.h \ voe_handler.h libethercat_la_CFLAGS = -fno-strict-aliasing -Wall -I$(top_srcdir) @@ -70,8 +70,10 @@ libethercat_la_CFLAGS = -fno-strict-aliasing -Wall -I$(top_srcdir) # 1:0.0 # ecrt_master_sync_reference_clock_to() added. # 2:0:1 +# SoE requests added +# 3:0:2 # -libethercat_la_LDFLAGS = -version-info 2:0:1 +libethercat_la_LDFLAGS = -version-info 3:0:2 pkgconfig_DATA = libethercat.pc diff --git a/lib/slave_config.c b/lib/slave_config.c index ea28f75a..4e00fcf9 100644 --- a/lib/slave_config.c +++ b/lib/slave_config.c @@ -35,6 +35,7 @@ #include "slave_config.h" #include "domain.h" #include "sdo_request.h" +#include "soe_request.h" #include "reg_request.h" #include "voe_handler.h" #include "master.h" @@ -44,6 +45,7 @@ void ec_slave_config_clear(ec_slave_config_t *sc) { ec_sdo_request_t *r, *next_r; + ec_soe_request_t *s, *next_s; ec_reg_request_t *e, *next_e; ec_voe_handler_t *v, *next_v; @@ -56,6 +58,15 @@ void ec_slave_config_clear(ec_slave_config_t *sc) } sc->first_sdo_request = NULL; + s = sc->first_soe_request; + while (r) { + next_s = s->next; + ec_soe_request_clear(s); + free(s); + s = next_s; + } + sc->first_soe_request = NULL; + e = sc->first_reg_request; while (e) { next_e = e->next; @@ -607,6 +618,76 @@ ec_sdo_request_t *ecrt_slave_config_create_sdo_request(ec_slave_config_t *sc, /*****************************************************************************/ +void ec_slave_config_add_soe_request(ec_slave_config_t *sc, + ec_soe_request_t *req) +{ + if (sc->first_soe_request) { + ec_soe_request_t *r = sc->first_soe_request; + while (r->next) { + r = r->next; + } + r->next = req; + } else { + sc->first_soe_request = req; + } +} + +/*****************************************************************************/ + +ec_soe_request_t *ecrt_slave_config_create_soe_request(ec_slave_config_t *sc, + uint8_t drive_no, uint16_t idn, size_t size) +{ + ec_ioctl_soe_request_t data; + ec_soe_request_t *req; + int ret; + + req = malloc(sizeof(ec_soe_request_t)); + if (!req) { + fprintf(stderr, "Failed to allocate memory.\n"); + return 0; + } + + if (size) { + req->data = malloc(size); + if (!req->data) { + fprintf(stderr, "Failed to allocate %zu bytes of SoE data" + " memory.\n", size); + free(req); + return 0; + } + } else { + req->data = NULL; + } + + data.config_index = sc->index; + data.drive_no = drive_no; + data.idn = idn; + data.size = size; + + ret = ioctl(sc->master->fd, EC_IOCTL_SC_SOE_REQUEST, &data); + if (EC_IOCTL_IS_ERROR(ret)) { + fprintf(stderr, "Failed to create SoE request: %s\n", + strerror(EC_IOCTL_ERRNO(ret))); + ec_soe_request_clear(req); + free(req); + return NULL; + } + + req->next = NULL; + req->config = sc; + req->index = data.request_index; + req->drive_no = data.drive_no; + req->idn = data.idn; + req->data_size = size; + req->mem_size = size; + + ec_slave_config_add_soe_request(sc, req); + + return req; +} + +/*****************************************************************************/ + void ec_slave_config_add_reg_request(ec_slave_config_t *sc, ec_reg_request_t *reg) { diff --git a/lib/slave_config.h b/lib/slave_config.h index 7d855a5f..6e3a4355 100644 --- a/lib/slave_config.h +++ b/lib/slave_config.h @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2012 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT master userspace library. * @@ -39,6 +37,7 @@ struct ec_slave_config { uint16_t alias; uint16_t position; ec_sdo_request_t *first_sdo_request; + ec_soe_request_t *first_soe_request; ec_reg_request_t *first_reg_request; ec_voe_handler_t *first_voe_handler; }; diff --git a/lib/soe_request.c b/lib/soe_request.c new file mode 100644 index 00000000..99bc311e --- /dev/null +++ b/lib/soe_request.c @@ -0,0 +1,182 @@ +/****************************************************************************** + * + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH + * + * This file is part of the IgH EtherCAT master userspace library. + * + * The IgH EtherCAT master userspace library is free software; you can + * redistribute it and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; version 2.1 + * of the License. + * + * The IgH EtherCAT master userspace library is distributed in the hope that + * it will be useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with the IgH EtherCAT master userspace library. If not, see + * . + * + * --- + * + * The license mentioned above concerns the source code only. Using the + * EtherCAT technology and brand is only permitted in compliance with the + * industrial property and similar rights of Beckhoff Automation GmbH. + * + *****************************************************************************/ + +/** \file + * Canopen over EtherCAT SoE request functions. + */ + +/*****************************************************************************/ + +#include +#include + +#include "ioctl.h" +#include "soe_request.h" +#include "slave_config.h" +#include "master.h" + +/*****************************************************************************/ + +void ec_soe_request_clear(ec_soe_request_t *req) +{ + if (req->data) { + free(req->data); + req->data = NULL; + } +} + +/***************************************************************************** + * Application interface. + ****************************************************************************/ + +void ecrt_soe_request_idn(ec_soe_request_t *req, uint8_t drive_no, + uint16_t idn) +{ + ec_ioctl_soe_request_t data; + int ret; + + data.config_index = req->config->index; + data.request_index = req->index; + data.drive_no = drive_no; + data.idn = idn; + + ret = ioctl(req->config->master->fd, EC_IOCTL_SOE_REQUEST_IDN, &data); + if (EC_IOCTL_IS_ERROR(ret)) { + fprintf(stderr, "Failed to set SoE request IDN: %s\n", + strerror(EC_IOCTL_ERRNO(ret))); + } +} + +/*****************************************************************************/ + +void ecrt_soe_request_timeout(ec_soe_request_t *req, uint32_t timeout) +{ + ec_ioctl_soe_request_t data; + int ret; + + data.config_index = req->config->index; + data.request_index = req->index; + data.timeout = timeout; + + ret = ioctl(req->config->master->fd, EC_IOCTL_SOE_REQUEST_TIMEOUT, &data); + if (EC_IOCTL_IS_ERROR(ret)) { + fprintf(stderr, "Failed to set SoE request timeout: %s\n", + strerror(EC_IOCTL_ERRNO(ret))); + } +} + +/*****************************************************************************/ + +uint8_t *ecrt_soe_request_data(ec_soe_request_t *req) +{ + return req->data; +} + +/*****************************************************************************/ + +size_t ecrt_soe_request_data_size(const ec_soe_request_t *req) +{ + return req->data_size; +} + +/*****************************************************************************/ + +ec_request_state_t ecrt_soe_request_state(ec_soe_request_t *req) +{ + ec_ioctl_soe_request_t data; + int ret; + + data.config_index = req->config->index; + data.request_index = req->index; + + ret = ioctl(req->config->master->fd, EC_IOCTL_SOE_REQUEST_STATE, &data); + if (EC_IOCTL_IS_ERROR(ret)) { + fprintf(stderr, "Failed to get SoE request state: %s\n", + strerror(EC_IOCTL_ERRNO(ret))); + return EC_REQUEST_ERROR; + } + + if (data.size) { // new data waiting to be copied + if (req->mem_size < data.size) { + fprintf(stderr, "Received %zu bytes do not fit info SoE data" + " memory (%zu bytes)!\n", data.size, req->mem_size); + return EC_REQUEST_ERROR; + } + + data.data = req->data; + + ret = ioctl(req->config->master->fd, + EC_IOCTL_SOE_REQUEST_DATA, &data); + if (EC_IOCTL_IS_ERROR(ret)) { + fprintf(stderr, "Failed to get SoE data: %s\n", + strerror(EC_IOCTL_ERRNO(ret))); + return EC_REQUEST_ERROR; + } + req->data_size = data.size; + } + + return data.state; +} + +/*****************************************************************************/ + +void ecrt_soe_request_read(ec_soe_request_t *req) +{ + ec_ioctl_soe_request_t data; + int ret; + + data.config_index = req->config->index; + data.request_index = req->index; + + ret = ioctl(req->config->master->fd, EC_IOCTL_SOE_REQUEST_READ, &data); + if (EC_IOCTL_IS_ERROR(ret)) { + fprintf(stderr, "Failed to command an SoE read operation : %s\n", + strerror(EC_IOCTL_ERRNO(ret))); + } +} + +/*****************************************************************************/ + +void ecrt_soe_request_write(ec_soe_request_t *req) +{ + ec_ioctl_soe_request_t data; + int ret; + + data.config_index = req->config->index; + data.request_index = req->index; + data.data = req->data; + data.size = req->data_size; + + ret = ioctl(req->config->master->fd, EC_IOCTL_SOE_REQUEST_WRITE, &data); + if (EC_IOCTL_IS_ERROR(ret)) { + fprintf(stderr, "Failed to command an SDO write operation : %s\n", + strerror(EC_IOCTL_ERRNO(ret))); + } +} + +/*****************************************************************************/ diff --git a/lib/soe_request.h b/lib/soe_request.h new file mode 100644 index 00000000..43f8e72d --- /dev/null +++ b/lib/soe_request.h @@ -0,0 +1,48 @@ +/****************************************************************************** + * + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH + * + * This file is part of the IgH EtherCAT master userspace library. + * + * The IgH EtherCAT master userspace library is free software; you can + * redistribute it and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; version 2.1 + * of the License. + * + * The IgH EtherCAT master userspace library is distributed in the hope that + * it will be useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with the IgH EtherCAT master userspace library. If not, see + * . + * + * --- + * + * The license mentioned above concerns the source code only. Using the + * EtherCAT technology and brand is only permitted in compliance with the + * industrial property and similar rights of Beckhoff Automation GmbH. + * + *****************************************************************************/ + +#include "include/ecrt.h" + +/*****************************************************************************/ + +struct ec_soe_request { + ec_soe_request_t *next; /**< List header. */ + ec_slave_config_t *config; /**< Parent slave configuration. */ + unsigned int index; /**< Request index (identifier). */ + uint8_t drive_no; /**< SoE drive number. */ + uint16_t idn; /**< SoE ID number. */ + uint8_t *data; /**< Pointer to SoE data. */ + size_t mem_size; /**< Size of SoE data memory. */ + size_t data_size; /**< Size of SoE data. */ +}; + +/*****************************************************************************/ + +void ec_soe_request_clear(ec_soe_request_t *); + +/*****************************************************************************/ diff --git a/master/device.c b/master/device.c index deb938fa..f23cd776 100644 --- a/master/device.c +++ b/master/device.c @@ -56,6 +56,11 @@ /*****************************************************************************/ +enum { + /* genet driver needs extra headroom in skb for status block */ + EXTRA_HEADROOM = 64, +}; + /** Constructor. * * \return 0 in case of success, else < 0 @@ -127,14 +132,14 @@ int ec_device_init( #endif for (i = 0; i < EC_TX_RING_SIZE; i++) { - if (!(device->tx_skb[i] = dev_alloc_skb(ETH_FRAME_LEN))) { + if (!(device->tx_skb[i] = dev_alloc_skb(ETH_FRAME_LEN + EXTRA_HEADROOM))) { EC_MASTER_ERR(master, "Error allocating device socket buffer!\n"); ret = -ENOMEM; goto out_tx_ring; } // add Ethernet-II-header - skb_reserve(device->tx_skb[i], ETH_HLEN); + skb_reserve(device->tx_skb[i], ETH_HLEN + EXTRA_HEADROOM); eth = (struct ethhdr *) skb_push(device->tx_skb[i], ETH_HLEN); eth->h_proto = htons(0x88A4); memset(eth->h_dest, 0xFF, ETH_ALEN); diff --git a/master/fsm_master.c b/master/fsm_master.c index d2532510..92595dda 100644 --- a/master/fsm_master.c +++ b/master/fsm_master.c @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2012 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * @@ -66,6 +64,7 @@ void ec_fsm_master_state_assign_sii(ec_fsm_master_t *); void ec_fsm_master_state_write_sii(ec_fsm_master_t *); void ec_fsm_master_state_sdo_dictionary(ec_fsm_master_t *); void ec_fsm_master_state_sdo_request(ec_fsm_master_t *); +void ec_fsm_master_state_soe_request(ec_fsm_master_t *); void ec_fsm_master_enter_clear_addresses(ec_fsm_master_t *); void ec_fsm_master_enter_write_system_times(ec_fsm_master_t *); @@ -83,8 +82,18 @@ void ec_fsm_master_init( fsm->master = master; fsm->datagram = datagram; + // inits the member variables state, idle, dev_idx, link_state, + // slaves_responding, slave_states and rescan_required ec_fsm_master_reset(fsm); + fsm->retries = 0; + fsm->scan_jiffies = 0; + fsm->slave = NULL; + fsm->sii_request = NULL; + fsm->sii_index = 0; + fsm->sdo_request = NULL; + fsm->soe_request = NULL; + // init sub-state-machines ec_fsm_coe_init(&fsm->fsm_coe); ec_fsm_soe_init(&fsm->fsm_soe); @@ -492,17 +501,18 @@ int ec_fsm_master_action_process_sii( /*****************************************************************************/ -/** Check for pending SDO requests and process one. +/** Check for pending internal SDO/SoE requests and process one. * * \return non-zero, if an SDO request is processed. */ -int ec_fsm_master_action_process_sdo( +int ec_fsm_master_action_process_int_request( ec_fsm_master_t *fsm /**< Master state machine. */ ) { ec_master_t *master = fsm->master; ec_slave_t *slave; - ec_sdo_request_t *req; + ec_sdo_request_t *sdo_req; + ec_soe_request_t *soe_req; // search for internal requests to be processed for (slave = master->slaves; @@ -513,33 +523,61 @@ int ec_fsm_master_action_process_sdo( continue; } - list_for_each_entry(req, &slave->config->sdo_requests, list) { - if (req->state == EC_INT_REQUEST_QUEUED) { + list_for_each_entry(sdo_req, &slave->config->sdo_requests, list) { + if (sdo_req->state == EC_INT_REQUEST_QUEUED) { - if (ec_sdo_request_timed_out(req)) { - req->state = EC_INT_REQUEST_FAILURE; + if (ec_sdo_request_timed_out(sdo_req)) { + sdo_req->state = EC_INT_REQUEST_FAILURE; EC_SLAVE_DBG(slave, 1, "Internal SDO request" " timed out.\n"); continue; } if (slave->current_state == EC_SLAVE_STATE_INIT) { - req->state = EC_INT_REQUEST_FAILURE; + sdo_req->state = EC_INT_REQUEST_FAILURE; continue; } - req->state = EC_INT_REQUEST_BUSY; + sdo_req->state = EC_INT_REQUEST_BUSY; EC_SLAVE_DBG(slave, 1, "Processing internal" " SDO request...\n"); fsm->idle = 0; - fsm->sdo_request = req; + fsm->sdo_request = sdo_req; fsm->slave = slave; fsm->state = ec_fsm_master_state_sdo_request; - ec_fsm_coe_transfer(&fsm->fsm_coe, slave, req); + ec_fsm_coe_transfer(&fsm->fsm_coe, slave, sdo_req); ec_fsm_coe_exec(&fsm->fsm_coe, fsm->datagram); return 1; } } + + list_for_each_entry(soe_req, &slave->config->soe_requests, list) { + if (soe_req->state == EC_INT_REQUEST_QUEUED) { + + if (ec_soe_request_timed_out(soe_req)) { + soe_req->state = EC_INT_REQUEST_FAILURE; + EC_SLAVE_DBG(slave, 1, "Internal SoE request" + " timed out.\n"); + continue; + } + + if (slave->current_state == EC_SLAVE_STATE_INIT) { + soe_req->state = EC_INT_REQUEST_FAILURE; + continue; + } + + soe_req->state = EC_INT_REQUEST_BUSY; + EC_SLAVE_DBG(slave, 1, "Processing internal" + " SoE request...\n"); + fsm->idle = 0; + fsm->soe_request = soe_req; + fsm->slave = slave; + fsm->state = ec_fsm_master_state_soe_request; + ec_fsm_soe_transfer(&fsm->fsm_soe, slave, soe_req); + ec_fsm_soe_exec(&fsm->fsm_soe, fsm->datagram); + return 1; + } + } } return 0; } @@ -557,8 +595,8 @@ void ec_fsm_master_action_idle( ec_master_t *master = fsm->master; ec_slave_t *slave; - // Check for pending internal SDO requests - if (ec_fsm_master_action_process_sdo(fsm)) { + // Check for pending internal SDO or SoE requests + if (ec_fsm_master_action_process_int_request(fsm)) { return; } @@ -1357,8 +1395,51 @@ void ec_fsm_master_state_sdo_request( EC_SLAVE_DBG(fsm->slave, 1, "Finished internal SDO request.\n"); - // check for another SDO request - if (ec_fsm_master_action_process_sdo(fsm)) { + // check for another SDO/SoE request + if (ec_fsm_master_action_process_int_request(fsm)) { + return; // processing another request + } + + ec_fsm_master_restart(fsm); +} + +/*****************************************************************************/ + +/** Master state: SoE REQUEST. + */ +void ec_fsm_master_state_soe_request( + ec_fsm_master_t *fsm /**< Master state machine. */ + ) +{ + ec_soe_request_t *request = fsm->soe_request; + + if (!request) { + // configuration was cleared in the meantime + ec_fsm_master_restart(fsm); + return; + } + + if (ec_fsm_soe_exec(&fsm->fsm_soe, fsm->datagram)) { + return; + } + + if (!ec_fsm_soe_success(&fsm->fsm_soe)) { + EC_SLAVE_DBG(fsm->slave, 1, + "Failed to process internal SoE request.\n"); + request->state = EC_INT_REQUEST_FAILURE; + wake_up_all(&fsm->master->request_queue); + ec_fsm_master_restart(fsm); + return; + } + + // SoE request finished + request->state = EC_INT_REQUEST_SUCCESS; + wake_up_all(&fsm->master->request_queue); + + EC_SLAVE_DBG(fsm->slave, 1, "Finished internal SoE request.\n"); + + // check for another CoE/SoE request + if (ec_fsm_master_action_process_int_request(fsm)) { return; // processing another request } diff --git a/master/fsm_master.h b/master/fsm_master.h index a41b949c..3e461b43 100644 --- a/master/fsm_master.h +++ b/master/fsm_master.h @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2012 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * @@ -88,6 +86,7 @@ struct ec_fsm_master { ec_sii_write_request_t *sii_request; /**< SII write request */ off_t sii_index; /**< index to SII write request data */ ec_sdo_request_t *sdo_request; /**< SDO request to process. */ + ec_soe_request_t *soe_request; /**< SoE request to process. */ ec_fsm_coe_t fsm_coe; /**< CoE state machine */ ec_fsm_soe_t fsm_soe; /**< SoE state machine */ diff --git a/master/fsm_slave.c b/master/fsm_slave.c index 11229d55..14a1040e 100644 --- a/master/fsm_slave.c +++ b/master/fsm_slave.c @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2012 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * diff --git a/master/fsm_slave_config.c b/master/fsm_slave_config.c index 81a1ecc6..52f5a2d3 100644 --- a/master/fsm_slave_config.c +++ b/master/fsm_slave_config.c @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2008 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * @@ -85,6 +83,7 @@ void ec_fsm_slave_config_state_dc_cycle(ec_fsm_slave_config_t *); void ec_fsm_slave_config_state_dc_sync_check(ec_fsm_slave_config_t *); void ec_fsm_slave_config_state_dc_start(ec_fsm_slave_config_t *); void ec_fsm_slave_config_state_dc_assign(ec_fsm_slave_config_t *); +void ec_fsm_slave_config_state_wait_safeop(ec_fsm_slave_config_t *); void ec_fsm_slave_config_state_safeop(ec_fsm_slave_config_t *); void ec_fsm_slave_config_state_soe_conf_safeop(ec_fsm_slave_config_t *); void ec_fsm_slave_config_state_op(ec_fsm_slave_config_t *); @@ -105,6 +104,7 @@ void ec_fsm_slave_config_enter_watchdog(ec_fsm_slave_config_t *); void ec_fsm_slave_config_enter_pdo_sync(ec_fsm_slave_config_t *); void ec_fsm_slave_config_enter_fmmu(ec_fsm_slave_config_t *); void ec_fsm_slave_config_enter_dc_cycle(ec_fsm_slave_config_t *); +void ec_fsm_slave_config_enter_wait_safeop(ec_fsm_slave_config_t *); void ec_fsm_slave_config_enter_safeop(ec_fsm_slave_config_t *); void ec_fsm_slave_config_enter_soe_conf_safeop(ec_fsm_slave_config_t *); void ec_fsm_slave_config_enter_op(ec_fsm_slave_config_t *); @@ -135,6 +135,8 @@ void ec_fsm_slave_config_init( fsm->fsm_coe = fsm_coe; fsm->fsm_soe = fsm_soe; fsm->fsm_pdo = fsm_pdo; + + fsm->wait_ms = 0; } /*****************************************************************************/ @@ -1224,7 +1226,7 @@ void ec_fsm_slave_config_enter_fmmu( const ec_sync_t *sync; if (!slave->config) { - ec_fsm_slave_config_enter_safeop(fsm); + ec_fsm_slave_config_enter_wait_safeop(fsm); return; } @@ -1329,7 +1331,7 @@ void ec_fsm_slave_config_enter_dc_cycle( fsm->state = ec_fsm_slave_config_state_dc_cycle; } else { // DC are unused - ec_fsm_slave_config_enter_safeop(fsm); + ec_fsm_slave_config_enter_wait_safeop(fsm); } } @@ -1551,6 +1553,59 @@ void ec_fsm_slave_config_state_dc_assign( return; } + ec_fsm_slave_config_enter_wait_safeop(fsm); +} + +/*****************************************************************************/ + +/** Wait before SAFEOP transition. + * + * The feature flag WaitBeforeSAFEOPms can be used to add a wait time before + * going to SAFEOP. This can be used as a workaround for slaves that need some + * extra time for initialisation. + */ +void ec_fsm_slave_config_enter_wait_safeop( + ec_fsm_slave_config_t *fsm /**< slave state machine */ + ) +{ + ec_slave_config_t *config = fsm->slave->config; + fsm->wait_ms = 0UL; + if (config) { + ec_flag_t *flag = ec_slave_config_find_flag(config, + "WaitBeforeSAFEOPms"); + if (flag && flag->value > 0) { + fsm->wait_ms = (unsigned long) flag->value; + } + } + + if (fsm->wait_ms > 0) { + fsm->state = ec_fsm_slave_config_state_wait_safeop; + + /* dummy read */ + ec_datagram_fprd(fsm->datagram, fsm->slave->station_address, + 0x0600, 1); + + fsm->jiffies_start = jiffies; + } + else { + ec_fsm_slave_config_enter_safeop(fsm); + } +} + +/*****************************************************************************/ + +/** Slave configuration state: WAIT SAFEOP. + */ +void ec_fsm_slave_config_state_wait_safeop( + ec_fsm_slave_config_t *fsm /**< slave state machine */ + ) +{ + unsigned long diff = jiffies - fsm->jiffies_start; + + if (diff * 1000 / HZ < fsm->wait_ms) { + return; + } + ec_fsm_slave_config_enter_safeop(fsm); } diff --git a/master/fsm_slave_config.h b/master/fsm_slave_config.h index 0111be7b..a89037bc 100644 --- a/master/fsm_slave_config.h +++ b/master/fsm_slave_config.h @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2008 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * @@ -68,6 +66,7 @@ struct ec_fsm_slave_config ec_soe_request_t soe_request_copy; /**< Copied SDO request. */ unsigned long jiffies_start; /**< For timeout calculations. */ unsigned int take_time; /**< Store jiffies after datagram reception. */ + unsigned long wait_ms; /**< Wait time (used to wait before SAFEOP). */ }; /*****************************************************************************/ diff --git a/master/ioctl.c b/master/ioctl.c index 3d0e1d81..96eae878 100644 --- a/master/ioctl.c +++ b/master/ioctl.c @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2012 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * @@ -1482,7 +1480,7 @@ static ATTRIBUTES int ec_ioctl_config_idn( ioctl->drive_no = req->drive_no; ioctl->idn = req->idn; - ioctl->state = req->state; + ioctl->state = req->al_state; ioctl->size = req->data_size; memcpy(ioctl->data, req->data, min((u32) ioctl->size, (u32) EC_MAX_IDN_DATA_SIZE)); @@ -2906,6 +2904,59 @@ static ATTRIBUTES int ec_ioctl_sc_create_sdo_request( /*****************************************************************************/ +/** Create an SoE request. + * + * \return Zero on success, otherwise a negative error code. + */ +static ATTRIBUTES int ec_ioctl_sc_create_soe_request( + ec_master_t *master, /**< EtherCAT master. */ + void *arg, /**< ioctl() argument. */ + ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ + ) +{ + ec_ioctl_soe_request_t data; + ec_slave_config_t *sc; + ec_soe_request_t *req; + + if (unlikely(!ctx->requested)) + return -EPERM; + + if (copy_from_user(&data, (void __user *) arg, sizeof(data))) { + return -EFAULT; + } + + data.request_index = 0; + + if (down_interruptible(&master->master_sem)) + return -EINTR; + + sc = ec_master_get_config(master, data.config_index); + if (!sc) { + up(&master->master_sem); + return -ENOENT; + } + + list_for_each_entry(req, &sc->soe_requests, list) { + data.request_index++; + } + + up(&master->master_sem); /** \todo sc could be invalidated */ + + req = ecrt_slave_config_create_soe_request_err(sc, data.drive_no, + data.idn, data.size); + if (IS_ERR(req)) { + return PTR_ERR(req); + } + + if (copy_to_user((void __user *) arg, &data, sizeof(data))) { + return -EFAULT; + } + + return 0; +} + +/*****************************************************************************/ + /** Create a register request. * * \return Zero on success, otherwise a negative error code. @@ -3137,6 +3188,7 @@ static ATTRIBUTES int ec_ioctl_sc_flag( kfree(key); return -EFAULT; } + key[ioctl.key_size] = '\0'; if (down_interruptible(&master->master_sem)) { kfree(key); @@ -3569,6 +3621,255 @@ static ATTRIBUTES int ec_ioctl_sdo_request_data( /*****************************************************************************/ +/** Sets an SoE request's drive number and IDN. + * + * \return Zero on success, otherwise a negative error code. + */ +static ATTRIBUTES int ec_ioctl_soe_request_index( + ec_master_t *master, /**< EtherCAT master. */ + void *arg, /**< ioctl() argument. */ + ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ + ) +{ + ec_ioctl_soe_request_t data; + ec_slave_config_t *sc; + ec_soe_request_t *req; + + if (unlikely(!ctx->requested)) + return -EPERM; + + if (copy_from_user(&data, (void __user *) arg, sizeof(data))) + return -EFAULT; + + /* no locking of master_sem needed, because neither sc nor req will not be + * deleted in the meantime. */ + + if (!(sc = ec_master_get_config(master, data.config_index))) { + return -ENOENT; + } + + if (!(req = ec_slave_config_find_soe_request(sc, data.request_index))) { + return -ENOENT; + } + + ecrt_soe_request_idn(req, data.drive_no, data.idn); + return 0; +} + +/*****************************************************************************/ + +/** Sets an CoE request's timeout. + * + * \return Zero on success, otherwise a negative error code. + */ +static ATTRIBUTES int ec_ioctl_soe_request_timeout( + ec_master_t *master, /**< EtherCAT master. */ + void *arg, /**< ioctl() argument. */ + ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ + ) +{ + ec_ioctl_soe_request_t data; + ec_slave_config_t *sc; + ec_soe_request_t *req; + + if (unlikely(!ctx->requested)) + return -EPERM; + + if (copy_from_user(&data, (void __user *) arg, sizeof(data))) + return -EFAULT; + + /* no locking of master_sem needed, because neither sc nor req will not be + * deleted in the meantime. */ + + if (!(sc = ec_master_get_config(master, data.config_index))) { + return -ENOENT; + } + + if (!(req = ec_slave_config_find_soe_request(sc, data.request_index))) { + return -ENOENT; + } + + ecrt_soe_request_timeout(req, data.timeout); + return 0; +} + +/*****************************************************************************/ + +/** Gets an SoE request's state. + * + * \return Zero on success, otherwise a negative error code. + */ +static ATTRIBUTES int ec_ioctl_soe_request_state( + ec_master_t *master, /**< EtherCAT master. */ + void *arg, /**< ioctl() argument. */ + ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ + ) +{ + ec_ioctl_soe_request_t data; + ec_slave_config_t *sc; + ec_soe_request_t *req; + + if (unlikely(!ctx->requested)) + return -EPERM; + + if (copy_from_user(&data, (void __user *) arg, sizeof(data))) + return -EFAULT; + + /* no locking of master_sem needed, because neither sc nor req will not be + * deleted in the meantime. */ + + if (!(sc = ec_master_get_config(master, data.config_index))) { + return -ENOENT; + } + + if (!(req = ec_slave_config_find_soe_request(sc, data.request_index))) { + return -ENOENT; + } + + data.state = ecrt_soe_request_state(req); + if (data.state == EC_REQUEST_SUCCESS && req->dir == EC_DIR_INPUT) { + data.size = ecrt_soe_request_data_size(req); + } + else { + data.size = 0; + } + + if (copy_to_user((void __user *) arg, &data, sizeof(data))) + return -EFAULT; + + return 0; +} + +/*****************************************************************************/ + +/** Starts an SoE IDN read operation. + * + * \return Zero on success, otherwise a negative error code. + */ +static ATTRIBUTES int ec_ioctl_soe_request_read( + ec_master_t *master, /**< EtherCAT master. */ + void *arg, /**< ioctl() argument. */ + ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ + ) +{ + ec_ioctl_soe_request_t data; + ec_slave_config_t *sc; + ec_soe_request_t *req; + + if (unlikely(!ctx->requested)) + return -EPERM; + + if (copy_from_user(&data, (void __user *) arg, sizeof(data))) + return -EFAULT; + + /* no locking of master_sem needed, because neither sc nor req will not be + * deleted in the meantime. */ + + if (!(sc = ec_master_get_config(master, data.config_index))) { + return -ENOENT; + } + + if (!(req = ec_slave_config_find_soe_request(sc, data.request_index))) { + return -ENOENT; + } + + ecrt_soe_request_read(req); + return 0; +} + +/*****************************************************************************/ + +/** Starts an SoE IDN write operation. + * + * \return Zero on success, otherwise a negative error code. + */ +static ATTRIBUTES int ec_ioctl_soe_request_write( + ec_master_t *master, /**< EtherCAT master. */ + void *arg, /**< ioctl() argument. */ + ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ + ) +{ + ec_ioctl_soe_request_t data; + ec_slave_config_t *sc; + ec_soe_request_t *req; + int ret; + + if (unlikely(!ctx->requested)) + return -EPERM; + + if (copy_from_user(&data, (void __user *) arg, sizeof(data))) + return -EFAULT; + + if (!data.size) { + EC_MASTER_ERR(master, "IDN write: Data size may not be zero!\n"); + return -EINVAL; + } + + /* no locking of master_sem needed, because neither sc nor req will not be + * deleted in the meantime. */ + + if (!(sc = ec_master_get_config(master, data.config_index))) { + return -ENOENT; + } + + if (!(req = ec_slave_config_find_soe_request(sc, data.request_index))) { + return -ENOENT; + } + + ret = ec_soe_request_alloc(req, data.size); + if (ret) + return ret; + + if (copy_from_user(req->data, (void __user *) data.data, data.size)) + return -EFAULT; + + req->data_size = data.size; + ecrt_soe_request_write(req); + return 0; +} + +/*****************************************************************************/ + +/** Read SoE IDN data. + * + * \return Zero on success, otherwise a negative error code. + */ +static ATTRIBUTES int ec_ioctl_soe_request_data( + ec_master_t *master, /**< EtherCAT master. */ + void *arg, /**< ioctl() argument. */ + ec_ioctl_context_t *ctx /**< Private data structure of file handle. */ + ) +{ + ec_ioctl_soe_request_t data; + ec_slave_config_t *sc; + ec_soe_request_t *req; + + if (unlikely(!ctx->requested)) + return -EPERM; + + if (copy_from_user(&data, (void __user *) arg, sizeof(data))) + return -EFAULT; + + /* no locking of master_sem needed, because neither sc nor req will not be + * deleted in the meantime. */ + + if (!(sc = ec_master_get_config(master, data.config_index))) { + return -ENOENT; + } + + if (!(req = ec_slave_config_find_soe_request(sc, data.request_index))) { + return -ENOENT; + } + + if (copy_to_user((void __user *) data.data, ecrt_soe_request_data(req), + ecrt_soe_request_data_size(req))) + return -EFAULT; + + return 0; +} + +/*****************************************************************************/ + /** Read register data. * * \return Zero on success, otherwise a negative error code. @@ -4712,6 +5013,13 @@ long EC_IOCTL( } ret = ec_ioctl_sc_create_sdo_request(master, arg, ctx); break; + case EC_IOCTL_SC_SOE_REQUEST: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_sc_create_soe_request(master, arg, ctx); + break; case EC_IOCTL_SC_REG_REQUEST: if (!ctx->writable) { ret = -EPERM; @@ -4800,6 +5108,40 @@ long EC_IOCTL( case EC_IOCTL_SDO_REQUEST_DATA: ret = ec_ioctl_sdo_request_data(master, arg, ctx); break; + case EC_IOCTL_SOE_REQUEST_IDN: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_soe_request_index(master, arg, ctx); + break; + case EC_IOCTL_SOE_REQUEST_TIMEOUT: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_soe_request_timeout(master, arg, ctx); + break; + case EC_IOCTL_SOE_REQUEST_STATE: + ret = ec_ioctl_soe_request_state(master, arg, ctx); + break; + case EC_IOCTL_SOE_REQUEST_READ: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_soe_request_read(master, arg, ctx); + break; + case EC_IOCTL_SOE_REQUEST_WRITE: + if (!ctx->writable) { + ret = -EPERM; + break; + } + ret = ec_ioctl_soe_request_write(master, arg, ctx); + break; + case EC_IOCTL_SOE_REQUEST_DATA: + ret = ec_ioctl_soe_request_data(master, arg, ctx); + break; case EC_IOCTL_REG_REQUEST_DATA: ret = ec_ioctl_reg_request_data(master, arg, ctx); break; diff --git a/master/ioctl.h b/master/ioctl.h index 539d1f66..d011ecae 100644 --- a/master/ioctl.h +++ b/master/ioctl.h @@ -47,7 +47,7 @@ * * Increment this when changing the ioctl interface! */ -#define EC_IOCTL_VERSION_MAGIC 33 +#define EC_IOCTL_VERSION_MAGIC 34 // Command-line tool #define EC_IOCTL_MODULE EC_IOR(0x00, ec_ioctl_module_t) @@ -121,34 +121,41 @@ #define EC_IOCTL_SC_EMERG_CLEAR EC_IOW(0x3e, ec_ioctl_sc_emerg_t) #define EC_IOCTL_SC_EMERG_OVERRUNS EC_IOWR(0x3f, ec_ioctl_sc_emerg_t) #define EC_IOCTL_SC_SDO_REQUEST EC_IOWR(0x40, ec_ioctl_sdo_request_t) -#define EC_IOCTL_SC_REG_REQUEST EC_IOWR(0x41, ec_ioctl_reg_request_t) -#define EC_IOCTL_SC_VOE EC_IOWR(0x42, ec_ioctl_voe_t) -#define EC_IOCTL_SC_STATE EC_IOWR(0x43, ec_ioctl_sc_state_t) -#define EC_IOCTL_SC_IDN EC_IOW(0x44, ec_ioctl_sc_idn_t) -#define EC_IOCTL_SC_FLAG EC_IOW(0x45, ec_ioctl_sc_flag_t) -#define EC_IOCTL_DOMAIN_SIZE EC_IO(0x46) -#define EC_IOCTL_DOMAIN_OFFSET EC_IO(0x47) -#define EC_IOCTL_DOMAIN_PROCESS EC_IO(0x48) -#define EC_IOCTL_DOMAIN_QUEUE EC_IO(0x49) -#define EC_IOCTL_DOMAIN_STATE EC_IOWR(0x4a, ec_ioctl_domain_state_t) -#define EC_IOCTL_SDO_REQUEST_INDEX EC_IOWR(0x4b, ec_ioctl_sdo_request_t) -#define EC_IOCTL_SDO_REQUEST_TIMEOUT EC_IOWR(0x4c, ec_ioctl_sdo_request_t) -#define EC_IOCTL_SDO_REQUEST_STATE EC_IOWR(0x4d, ec_ioctl_sdo_request_t) -#define EC_IOCTL_SDO_REQUEST_READ EC_IOWR(0x4e, ec_ioctl_sdo_request_t) -#define EC_IOCTL_SDO_REQUEST_WRITE EC_IOWR(0x4f, ec_ioctl_sdo_request_t) -#define EC_IOCTL_SDO_REQUEST_DATA EC_IOWR(0x50, ec_ioctl_sdo_request_t) -#define EC_IOCTL_REG_REQUEST_DATA EC_IOWR(0x51, ec_ioctl_reg_request_t) -#define EC_IOCTL_REG_REQUEST_STATE EC_IOWR(0x52, ec_ioctl_reg_request_t) -#define EC_IOCTL_REG_REQUEST_WRITE EC_IOWR(0x53, ec_ioctl_reg_request_t) -#define EC_IOCTL_REG_REQUEST_READ EC_IOWR(0x54, ec_ioctl_reg_request_t) -#define EC_IOCTL_VOE_SEND_HEADER EC_IOW(0x55, ec_ioctl_voe_t) -#define EC_IOCTL_VOE_REC_HEADER EC_IOWR(0x56, ec_ioctl_voe_t) -#define EC_IOCTL_VOE_READ EC_IOW(0x57, ec_ioctl_voe_t) -#define EC_IOCTL_VOE_READ_NOSYNC EC_IOW(0x58, ec_ioctl_voe_t) -#define EC_IOCTL_VOE_WRITE EC_IOWR(0x59, ec_ioctl_voe_t) -#define EC_IOCTL_VOE_EXEC EC_IOWR(0x5a, ec_ioctl_voe_t) -#define EC_IOCTL_VOE_DATA EC_IOWR(0x5b, ec_ioctl_voe_t) -#define EC_IOCTL_SET_SEND_INTERVAL EC_IOW(0x5c, size_t) +#define EC_IOCTL_SC_SOE_REQUEST EC_IOWR(0x41, ec_ioctl_soe_request_t) +#define EC_IOCTL_SC_REG_REQUEST EC_IOWR(0x42, ec_ioctl_reg_request_t) +#define EC_IOCTL_SC_VOE EC_IOWR(0x43, ec_ioctl_voe_t) +#define EC_IOCTL_SC_STATE EC_IOWR(0x44, ec_ioctl_sc_state_t) +#define EC_IOCTL_SC_IDN EC_IOW(0x45, ec_ioctl_sc_idn_t) +#define EC_IOCTL_SC_FLAG EC_IOW(0x46, ec_ioctl_sc_flag_t) +#define EC_IOCTL_DOMAIN_SIZE EC_IO(0x47) +#define EC_IOCTL_DOMAIN_OFFSET EC_IO(0x48) +#define EC_IOCTL_DOMAIN_PROCESS EC_IO(0x49) +#define EC_IOCTL_DOMAIN_QUEUE EC_IO(0x4a) +#define EC_IOCTL_DOMAIN_STATE EC_IOWR(0x4b, ec_ioctl_domain_state_t) +#define EC_IOCTL_SDO_REQUEST_INDEX EC_IOWR(0x4c, ec_ioctl_sdo_request_t) +#define EC_IOCTL_SDO_REQUEST_TIMEOUT EC_IOWR(0x4d, ec_ioctl_sdo_request_t) +#define EC_IOCTL_SDO_REQUEST_STATE EC_IOWR(0x4e, ec_ioctl_sdo_request_t) +#define EC_IOCTL_SDO_REQUEST_READ EC_IOWR(0x4f, ec_ioctl_sdo_request_t) +#define EC_IOCTL_SDO_REQUEST_WRITE EC_IOWR(0x50, ec_ioctl_sdo_request_t) +#define EC_IOCTL_SDO_REQUEST_DATA EC_IOWR(0x51, ec_ioctl_sdo_request_t) +#define EC_IOCTL_SOE_REQUEST_IDN EC_IOWR(0x52, ec_ioctl_soe_request_t) +#define EC_IOCTL_SOE_REQUEST_TIMEOUT EC_IOWR(0x53, ec_ioctl_soe_request_t) +#define EC_IOCTL_SOE_REQUEST_STATE EC_IOWR(0x54, ec_ioctl_soe_request_t) +#define EC_IOCTL_SOE_REQUEST_READ EC_IOWR(0x55, ec_ioctl_soe_request_t) +#define EC_IOCTL_SOE_REQUEST_WRITE EC_IOWR(0x56, ec_ioctl_soe_request_t) +#define EC_IOCTL_SOE_REQUEST_DATA EC_IOWR(0x57, ec_ioctl_soe_request_t) +#define EC_IOCTL_REG_REQUEST_DATA EC_IOWR(0x58, ec_ioctl_reg_request_t) +#define EC_IOCTL_REG_REQUEST_STATE EC_IOWR(0x59, ec_ioctl_reg_request_t) +#define EC_IOCTL_REG_REQUEST_WRITE EC_IOWR(0x5a, ec_ioctl_reg_request_t) +#define EC_IOCTL_REG_REQUEST_READ EC_IOWR(0x5b, ec_ioctl_reg_request_t) +#define EC_IOCTL_VOE_SEND_HEADER EC_IOW(0x5c, ec_ioctl_voe_t) +#define EC_IOCTL_VOE_REC_HEADER EC_IOWR(0x5d, ec_ioctl_voe_t) +#define EC_IOCTL_VOE_READ EC_IOW(0x5e, ec_ioctl_voe_t) +#define EC_IOCTL_VOE_READ_NOSYNC EC_IOW(0x5f, ec_ioctl_voe_t) +#define EC_IOCTL_VOE_WRITE EC_IOWR(0x60, ec_ioctl_voe_t) +#define EC_IOCTL_VOE_EXEC EC_IOWR(0x61, ec_ioctl_voe_t) +#define EC_IOCTL_VOE_DATA EC_IOWR(0x62, ec_ioctl_voe_t) +#define EC_IOCTL_SET_SEND_INTERVAL EC_IOW(0x63, size_t) /*****************************************************************************/ @@ -762,6 +769,22 @@ typedef struct { /*****************************************************************************/ +typedef struct { + // inputs + uint32_t config_index; + + // inputs/outputs + uint32_t request_index; + uint8_t drive_no; + uint16_t idn; + size_t size; + uint8_t *data; + uint32_t timeout; + ec_request_state_t state; +} ec_ioctl_soe_request_t; + +/*****************************************************************************/ + typedef struct { // inputs uint32_t config_index; diff --git a/master/sdo_request.c b/master/sdo_request.c index a1ba8997..dc1ff8a0 100644 --- a/master/sdo_request.c +++ b/master/sdo_request.c @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2008 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * @@ -61,10 +59,12 @@ void ec_sdo_request_init( req->data = NULL; req->mem_size = 0; req->data_size = 0; - req->dir = EC_DIR_INVALID; req->issue_timeout = 0; // no timeout req->response_timeout = EC_SDO_REQUEST_RESPONSE_TIMEOUT; + req->dir = EC_DIR_INVALID; req->state = EC_INT_REQUEST_INIT; + req->jiffies_start = 0U; + req->jiffies_sent = 0U; req->errno = 0; req->abort_code = 0x00000000; } diff --git a/master/sdo_request.h b/master/sdo_request.h index 80b1c101..d1c39135 100644 --- a/master/sdo_request.h +++ b/master/sdo_request.h @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2008 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * diff --git a/master/slave_config.c b/master/slave_config.c index f2c45a30..2617ef49 100644 --- a/master/slave_config.c +++ b/master/slave_config.c @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2012 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * @@ -87,6 +85,7 @@ void ec_slave_config_init( INIT_LIST_HEAD(&sc->sdo_configs); INIT_LIST_HEAD(&sc->sdo_requests); + INIT_LIST_HEAD(&sc->soe_requests); INIT_LIST_HEAD(&sc->reg_requests); INIT_LIST_HEAD(&sc->voe_handlers); INIT_LIST_HEAD(&sc->soe_configs); @@ -132,6 +131,13 @@ void ec_slave_config_clear( kfree(req); } + // free all SoE requests + list_for_each_entry_safe(soe, next_soe, &sc->soe_requests, list) { + list_del(&soe->list); + ec_soe_request_clear(soe); + kfree(soe); + } + // free all register requests list_for_each_entry_safe(reg, next_reg, &sc->reg_requests, list) { list_del(®->list); @@ -507,7 +513,7 @@ const ec_flag_t *ec_slave_config_get_flag_by_pos_const( /*****************************************************************************/ -/** Finds a CoE handler via its position in the list. +/** Finds a CoE SDO request via its position in the list. * * \return Search result, or NULL. */ @@ -529,6 +535,28 @@ ec_sdo_request_t *ec_slave_config_find_sdo_request( /*****************************************************************************/ +/** Finds a SoE request via its position in the list. + * + * \return Search result, or NULL. + */ +ec_soe_request_t *ec_slave_config_find_soe_request( + ec_slave_config_t *sc, /**< Slave configuration. */ + unsigned int pos /**< Position in the list. */ + ) +{ + ec_soe_request_t *req; + + list_for_each_entry(req, &sc->soe_requests, list) { + if (pos--) + continue; + return req; + } + + return NULL; +} + +/*****************************************************************************/ + /** Finds a register handler via its position in the list. * * \return Search result, or NULL. @@ -1158,6 +1186,58 @@ ec_sdo_request_t *ecrt_slave_config_create_sdo_request( /*****************************************************************************/ +/** Same as ecrt_slave_config_create_soe_request(), but with ERR_PTR() return + * value. + */ +ec_soe_request_t *ecrt_slave_config_create_soe_request_err( + ec_slave_config_t *sc, uint8_t drive_no, uint16_t idn, size_t size) +{ + ec_soe_request_t *req; + int ret; + + EC_CONFIG_DBG(sc, 1, "%s(sc = 0x%p, " + "drive_no = 0x%02X, idn = 0x%04X, size = %zu)\n", + __func__, sc, drive_no, idn, size); + + if (!(req = (ec_soe_request_t *) + kmalloc(sizeof(ec_soe_request_t), GFP_KERNEL))) { + EC_CONFIG_ERR(sc, "Failed to allocate IDN request memory!\n"); + return ERR_PTR(-ENOMEM); + } + + ec_soe_request_init(req); + ecrt_soe_request_idn(req, drive_no, idn); + + ret = ec_soe_request_alloc(req, size); + if (ret < 0) { + ec_soe_request_clear(req); + kfree(req); + return ERR_PTR(ret); + } + + // prepare data for optional writing + memset(req->data, 0x00, size); + req->data_size = size; + + down(&sc->master->master_sem); + list_add_tail(&req->list, &sc->soe_requests); + up(&sc->master->master_sem); + + return req; +} + +/*****************************************************************************/ + +ec_soe_request_t *ecrt_slave_config_create_soe_request( + ec_slave_config_t *sc, uint8_t drive_no, uint16_t idn, size_t size) +{ + ec_soe_request_t *req = ecrt_slave_config_create_soe_request_err(sc, + drive_no, idn, size); + return IS_ERR(req) ? NULL : req; +} + +/*****************************************************************************/ + /** Same as ecrt_slave_config_create_reg_request(), but with ERR_PTR() return * value. */ @@ -1373,6 +1453,7 @@ EXPORT_SYMBOL(ecrt_slave_config_emerg_pop); EXPORT_SYMBOL(ecrt_slave_config_emerg_clear); EXPORT_SYMBOL(ecrt_slave_config_emerg_overruns); EXPORT_SYMBOL(ecrt_slave_config_create_sdo_request); +EXPORT_SYMBOL(ecrt_slave_config_create_soe_request); EXPORT_SYMBOL(ecrt_slave_config_create_voe_handler); EXPORT_SYMBOL(ecrt_slave_config_create_reg_request); EXPORT_SYMBOL(ecrt_slave_config_state); diff --git a/master/slave_config.h b/master/slave_config.h index ad22e4ed..59cbecde 100644 --- a/master/slave_config.h +++ b/master/slave_config.h @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2012 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * @@ -143,6 +141,7 @@ struct ec_slave_config { struct list_head sdo_configs; /**< List of SDO configurations. */ struct list_head sdo_requests; /**< List of SDO requests. */ + struct list_head soe_requests; /**< List of SoE requests. */ struct list_head voe_handlers; /**< List of VoE handlers. */ struct list_head reg_requests; /**< List of register requests. */ struct list_head soe_configs; /**< List of SoE configurations. */ @@ -173,6 +172,8 @@ const ec_flag_t *ec_slave_config_get_flag_by_pos_const( const ec_slave_config_t *, unsigned int); ec_sdo_request_t *ec_slave_config_find_sdo_request(ec_slave_config_t *, unsigned int); +ec_soe_request_t *ec_slave_config_find_soe_request(ec_slave_config_t *, + unsigned int); ec_reg_request_t *ec_slave_config_find_reg_request(ec_slave_config_t *, unsigned int); ec_voe_handler_t *ec_slave_config_find_voe_handler(ec_slave_config_t *, @@ -181,6 +182,8 @@ ec_flag_t *ec_slave_config_find_flag(ec_slave_config_t *, const char *); ec_sdo_request_t *ecrt_slave_config_create_sdo_request_err( ec_slave_config_t *, uint16_t, uint8_t, size_t); +ec_soe_request_t *ecrt_slave_config_create_soe_request_err( + ec_slave_config_t *, uint8_t, uint16_t, size_t); ec_voe_handler_t *ecrt_slave_config_create_voe_handler_err( ec_slave_config_t *, size_t); ec_reg_request_t *ecrt_slave_config_create_reg_request_err( diff --git a/master/soe_request.c b/master/soe_request.c index 4ad90353..1dd28b2c 100644 --- a/master/soe_request.c +++ b/master/soe_request.c @@ -1,8 +1,6 @@ /****************************************************************************** * - * $Id$ - * - * Copyright (C) 2006-2008 Florian Pose, Ingenieurgemeinschaft IgH + * Copyright (C) 2006-2023 Florian Pose, Ingenieurgemeinschaft IgH * * This file is part of the IgH EtherCAT Master. * @@ -64,8 +62,10 @@ void ec_soe_request_init( req->data = NULL; req->mem_size = 0; req->data_size = 0; + req->issue_timeout = 0; // no timeout req->dir = EC_DIR_INVALID; req->state = EC_INT_REQUEST_INIT; + req->jiffies_start = 0U; req->jiffies_sent = 0U; req->error_code = 0x0000; } @@ -236,6 +236,7 @@ void ec_soe_request_read( req->dir = EC_DIR_INPUT; req->state = EC_INT_REQUEST_QUEUED; req->error_code = 0x0000; + req->jiffies_start = jiffies; } /*****************************************************************************/ @@ -249,6 +250,86 @@ void ec_soe_request_write( req->dir = EC_DIR_OUTPUT; req->state = EC_INT_REQUEST_QUEUED; req->error_code = 0x0000; + req->jiffies_start = jiffies; } /*****************************************************************************/ + +/** Checks, if the timeout was exceeded. + * + * \return non-zero if the timeout was exceeded, else zero. + */ +int ec_soe_request_timed_out(const ec_soe_request_t *req /**< SDO request. */) +{ + return req->issue_timeout + && jiffies - req->jiffies_start > HZ * req->issue_timeout / 1000; +} + +/***************************************************************************** + * Application interface. + ****************************************************************************/ + +void ecrt_soe_request_idn(ec_soe_request_t *req, uint8_t drive_no, + uint16_t idn) +{ + req->drive_no = drive_no; + req->idn = idn; +} + +/*****************************************************************************/ + +void ecrt_soe_request_timeout(ec_soe_request_t *req, uint32_t timeout) +{ + req->issue_timeout = timeout; +} + +/*****************************************************************************/ + +uint8_t *ecrt_soe_request_data(ec_soe_request_t *req) +{ + return req->data; +} + +/*****************************************************************************/ + +size_t ecrt_soe_request_data_size(const ec_soe_request_t *req) +{ + return req->data_size; +} + +/*****************************************************************************/ + +ec_request_state_t ecrt_soe_request_state(ec_soe_request_t *req) +{ + return ec_request_state_translation_table[req->state]; +} + +/*****************************************************************************/ + +void ecrt_soe_request_read(ec_soe_request_t *req) +{ + ec_soe_request_read(req); +} + +/*****************************************************************************/ + +void ecrt_soe_request_write(ec_soe_request_t *req) +{ + ec_soe_request_write(req); +} + +/*****************************************************************************/ + +/** \cond */ + +EXPORT_SYMBOL(ecrt_soe_request_idn); +EXPORT_SYMBOL(ecrt_soe_request_timeout); +EXPORT_SYMBOL(ecrt_soe_request_data); +EXPORT_SYMBOL(ecrt_soe_request_data_size); +EXPORT_SYMBOL(ecrt_soe_request_state); +EXPORT_SYMBOL(ecrt_soe_request_read); +EXPORT_SYMBOL(ecrt_soe_request_write); + +/** \endcond */ + +/*****************************************************************************/ diff --git a/master/soe_request.h b/master/soe_request.h index 40f61aa6..c5ff9404 100644 --- a/master/soe_request.h +++ b/master/soe_request.h @@ -45,7 +45,7 @@ /** Sercos-over-EtherCAT request. */ -typedef struct { +struct ec_soe_request { struct list_head list; /**< List item. */ uint8_t drive_no; /**< Drive number. */ uint16_t idn; /**< Sercos ID-Number. */ @@ -53,13 +53,16 @@ typedef struct { uint8_t *data; /**< Pointer to SDO data. */ size_t mem_size; /**< Size of SDO data memory. */ size_t data_size; /**< Size of SDO data. */ + uint32_t issue_timeout; /**< Maximum time in ms, the processing of the + request may take. */ ec_direction_t dir; /**< Direction. EC_DIR_OUTPUT means writing to the slave, EC_DIR_INPUT means reading from the slave. */ ec_internal_request_state_t state; /**< Request state. */ + unsigned long jiffies_start; /**< Jiffies, when the request was issued. */ unsigned long jiffies_sent; /**< Jiffies, when the upload/download request was sent. */ uint16_t error_code; /**< SoE error code. */ -} ec_soe_request_t; +}; /*****************************************************************************/ @@ -74,6 +77,7 @@ int ec_soe_request_copy_data(ec_soe_request_t *, const uint8_t *, size_t); int ec_soe_request_append_data(ec_soe_request_t *, const uint8_t *, size_t); void ec_soe_request_read(ec_soe_request_t *); void ec_soe_request_write(ec_soe_request_t *); +int ec_soe_request_timed_out(const ec_soe_request_t *); /*****************************************************************************/ diff --git a/script/ethercat.conf b/script/ethercat.conf index f6f0a549..0be604af 100644 --- a/script/ethercat.conf +++ b/script/ethercat.conf @@ -55,10 +55,10 @@ MASTER0_DEVICE="" # the EtherCAT-capable ones. If a certain (EtherCAT-capable) driver is not # found, a warning will appear. # -# Possible values: 8139too, e100, e1000, e1000e, r8169, generic, ccat, igb. +# Possible values: 8139too, e100, e1000, e1000e, r8169, generic, ccat, igb, igc. # Separate multiple drivers with spaces. # -# Note: The e100, e1000, e1000e, r8169, ccat and igb drivers are not built by +# Note: The e100, e1000, e1000e, r8169, ccat, igb and igc drivers are not built by # default. Enable them with the --enable- configure switches. # # Attention: When using the generic driver, the corresponding Ethernet device diff --git a/script/ethercatctl.in b/script/ethercatctl.in index 2da6e95b..7516b42d 100755 --- a/script/ethercatctl.in +++ b/script/ethercatctl.in @@ -102,6 +102,14 @@ start) MASTER_INDEX=$((${MASTER_INDEX} + 1)) done + if [ -z "${DEVICES}" ]; then + echo "ERROR: No network cards for EtherCAT specified." + echo -n "Please edit ${ETHERCAT_CONFIG} with root permissions" + echo -n " and set MASTER0_DEVICE variable to either a " + echo "network interface name (like eth0) or to a MAC address." + exit 1 + fi + # load master module if ! ${MODPROBE} ${MODPROBE_FLAGS} ec_master \ main_devices="${DEVICES}" backup_devices="${BACKUPS}"; then @@ -118,12 +126,12 @@ start) fi if [ "${MODULE}" != "generic" ] && [ "${MODULE}" != "ccat" ]; then - # try to unload standard module + # unload standard module and check if unloading was successful + ${RMMOD} "${MODULE}" 2> /dev/null || true if ${LSMOD} | grep "^${MODULE} " > /dev/null; then - if ! ${RMMOD} "${MODULE}"; then - ${RMMOD} ${LOADED_MODULES} - exit 1 - fi + # could not unload module + ${RMMOD} ${LOADED_MODULES} + exit 1 fi fi